Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
test_helpers.cc
00001 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 00002 00003 Licensed under the Apache License, Version 2.0 (the "License"); 00004 you may not use this file except in compliance with the License. 00005 You may obtain a copy of the License at 00006 00007 http://www.apache.org/licenses/LICENSE-2.0 00008 00009 Unless required by applicable law or agreed to in writing, software 00010 distributed under the License is distributed on an "AS IS" BASIS, 00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00012 See the License for the specific language governing permissions and 00013 limitations under the License. 00014 ==============================================================================*/ 00015 00016 #include "tensorflow/lite/experimental/micro/test_helpers.h" 00017 00018 #include "tensorflow/lite/c/c_api_internal.h" 00019 #include "tensorflow/lite/core/api/tensor_utils.h" 00020 #include "tensorflow/lite/experimental/micro/micro_utils.h" 00021 00022 namespace tflite { 00023 namespace testing { 00024 namespace { 00025 00026 class StackAllocator : public flatbuffers::Allocator { 00027 public: 00028 StackAllocator() : data_(data_backing_), data_size_(0) {} 00029 00030 uint8_t* allocate(size_t size) override { 00031 if ((data_size_ + size) > kStackAllocatorSize) { 00032 // TODO(petewarden): Add error reporting beyond returning null! 00033 return nullptr; 00034 } 00035 uint8_t* result = data_; 00036 data_ += size; 00037 data_size_ += size; 00038 return result; 00039 } 00040 00041 void deallocate(uint8_t* p, size_t) override {} 00042 00043 static StackAllocator& instance() { 00044 // Avoid using true dynamic memory allocation to be portable to bare metal. 00045 static char inst_memory[sizeof(StackAllocator)]; 00046 static StackAllocator* inst = new (inst_memory) StackAllocator; 00047 return *inst; 00048 } 00049 00050 static constexpr int kStackAllocatorSize = 4096; 00051 00052 private: 00053 uint8_t data_backing_[kStackAllocatorSize]; 00054 uint8_t* data_; 00055 int data_size_; 00056 }; 00057 00058 flatbuffers::FlatBufferBuilder* BuilderInstance() { 00059 static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)]; 00060 static flatbuffers::FlatBufferBuilder* inst = 00061 new (inst_memory) flatbuffers::FlatBufferBuilder( 00062 StackAllocator::kStackAllocatorSize, &StackAllocator::instance()); 00063 return inst; 00064 } 00065 00066 const Model* BuildMockModel() { 00067 using flatbuffers::Offset; 00068 flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); 00069 00070 constexpr size_t buffer_data_size = 1; 00071 const uint8_t buffer_data[buffer_data_size] = {21}; 00072 constexpr size_t buffers_size = 2; 00073 const Offset<Buffer> buffers[buffers_size] = { 00074 CreateBuffer(*builder), 00075 CreateBuffer(*builder, 00076 builder->CreateVector(buffer_data, buffer_data_size))}; 00077 constexpr size_t tensor_shape_size = 1; 00078 const int32_t tensor_shape[tensor_shape_size] = {1}; 00079 constexpr size_t tensors_size = 3; 00080 const Offset<Tensor> tensors[tensors_size] = { 00081 CreateTensor(*builder, 00082 builder->CreateVector(tensor_shape, tensor_shape_size), 00083 TensorType_INT32, 0, 00084 builder->CreateString("test_input_tensor"), 0, false), 00085 CreateTensor(*builder, 00086 builder->CreateVector(tensor_shape, tensor_shape_size), 00087 TensorType_UINT8, 1, 00088 builder->CreateString("test_weight_tensor"), 0, false), 00089 CreateTensor(*builder, 00090 builder->CreateVector(tensor_shape, tensor_shape_size), 00091 TensorType_INT32, 0, 00092 builder->CreateString("test_output_tensor"), 0, false), 00093 }; 00094 constexpr size_t inputs_size = 1; 00095 const int32_t inputs[inputs_size] = {0}; 00096 constexpr size_t outputs_size = 1; 00097 const int32_t outputs[outputs_size] = {2}; 00098 constexpr size_t operator_inputs_size = 2; 00099 const int32_t operator_inputs[operator_inputs_size] = {0, 1}; 00100 constexpr size_t operator_outputs_size = 1; 00101 const int32_t operator_outputs[operator_outputs_size] = {2}; 00102 constexpr size_t operators_size = 1; 00103 const Offset<Operator> operators[operators_size] = {CreateOperator( 00104 *builder, 0, builder->CreateVector(operator_inputs, operator_inputs_size), 00105 builder->CreateVector(operator_outputs, operator_outputs_size), 00106 BuiltinOptions_NONE)}; 00107 constexpr size_t subgraphs_size = 1; 00108 const Offset<SubGraph> subgraphs[subgraphs_size] = { 00109 CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), 00110 builder->CreateVector(inputs, inputs_size), 00111 builder->CreateVector(outputs, outputs_size), 00112 builder->CreateVector(operators, operators_size), 00113 builder->CreateString("test_subgraph"))}; 00114 constexpr size_t operator_codes_size = 1; 00115 const Offset<OperatorCode> operator_codes[operator_codes_size] = { 00116 CreateOperatorCodeDirect(*builder, BuiltinOperator_CUSTOM, "mock_custom", 00117 0)}; 00118 const Offset<Model> model_offset = CreateModel( 00119 *builder, 0, builder->CreateVector(operator_codes, operator_codes_size), 00120 builder->CreateVector(subgraphs, subgraphs_size), 00121 builder->CreateString("test_model"), 00122 builder->CreateVector(buffers, buffers_size)); 00123 FinishModelBuffer(*builder, model_offset); 00124 void* model_pointer = builder->GetBufferPointer(); 00125 const Model* model = flatbuffers::GetRoot<Model>(model_pointer); 00126 return model; 00127 } 00128 00129 } // namespace 00130 00131 const Model* GetMockModel() { 00132 static Model* model = nullptr; 00133 if (!model) { 00134 model = const_cast<Model*>(BuildMockModel()); 00135 } 00136 return model; 00137 } 00138 00139 const Tensor* Create1dFlatbufferTensor(int size) { 00140 using flatbuffers::Offset; 00141 flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); 00142 constexpr size_t tensor_shape_size = 1; 00143 const int32_t tensor_shape[tensor_shape_size] = {size}; 00144 const Offset<Tensor> tensor_offset = CreateTensor( 00145 *builder, builder->CreateVector(tensor_shape, tensor_shape_size), 00146 TensorType_INT32, 0, builder->CreateString("test_tensor"), 0, false); 00147 builder->Finish(tensor_offset); 00148 void* tensor_pointer = builder->GetBufferPointer(); 00149 const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer); 00150 return tensor; 00151 } 00152 00153 const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) { 00154 using flatbuffers::Offset; 00155 flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); 00156 const Offset<QuantizationParameters> quant_params = 00157 CreateQuantizationParameters(*builder, 0, 0, 0, 0, 00158 QuantizationDetails_NONE, 0, 0); 00159 constexpr size_t tensor_shape_size = 1; 00160 const int32_t tensor_shape[tensor_shape_size] = {size}; 00161 const Offset<Tensor> tensor_offset = CreateTensor( 00162 *builder, builder->CreateVector(tensor_shape, tensor_shape_size), 00163 TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, 00164 false); 00165 builder->Finish(tensor_offset); 00166 void* tensor_pointer = builder->GetBufferPointer(); 00167 const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer); 00168 return tensor; 00169 } 00170 00171 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* 00172 CreateFlatbufferBuffers() { 00173 using flatbuffers::Offset; 00174 flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); 00175 constexpr size_t buffers_size = 1; 00176 const Offset<Buffer> buffers[buffers_size] = { 00177 CreateBuffer(*builder), 00178 }; 00179 const flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> 00180 buffers_offset = builder->CreateVector(buffers, buffers_size); 00181 builder->Finish(buffers_offset); 00182 void* buffers_pointer = builder->GetBufferPointer(); 00183 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* result = 00184 flatbuffers::GetRoot<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>( 00185 buffers_pointer); 00186 return result; 00187 } 00188 00189 int TestStrcmp(const char* a, const char* b) { 00190 if ((a == nullptr) || (b == nullptr)) { 00191 return -1; 00192 } 00193 while ((*a != 0) && (*a == *b)) { 00194 a++; 00195 b++; 00196 } 00197 return *reinterpret_cast<const unsigned char*>(a) - 00198 *reinterpret_cast<const unsigned char*>(b); 00199 } 00200 00201 // Wrapper to forward kernel errors to the interpreter's error reporter. 00202 void ReportOpError(struct TfLiteContext* context, const char* format, ...) { 00203 ErrorReporter* error_reporter = static_cast<ErrorReporter*>(context->impl_); 00204 va_list args; 00205 va_start(args, format); 00206 error_reporter->Report(format, args); 00207 va_end(args); 00208 } 00209 00210 // Create a TfLiteIntArray from an array of ints. The first element in the 00211 // supplied array must be the size of the array expressed as an int. 00212 TfLiteIntArray* IntArrayFromInts(const int* int_array) { 00213 return const_cast<TfLiteIntArray*>( 00214 reinterpret_cast<const TfLiteIntArray*>(int_array)); 00215 } 00216 00217 // Create a TfLiteFloatArray from an array of floats. The first element in the 00218 // supplied array must be the size of the array expressed as a float. 00219 TfLiteFloatArray* FloatArrayFromFloats(const float* floats) { 00220 static_assert(sizeof(float) == sizeof(int), 00221 "assumes sizeof(float) == sizeof(int) to perform casting"); 00222 int size = static_cast<int>(floats[0]); 00223 *reinterpret_cast<int32_t*>(const_cast<float*>(floats)) = size; 00224 return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats)); 00225 } 00226 00227 TfLiteTensor CreateTensor(TfLiteIntArray* dims, const char* name, 00228 bool is_variable) { 00229 TfLiteTensor result; 00230 result.dims = dims; 00231 result.name = name; 00232 result.params = {}; 00233 result.quantization = {kTfLiteNoQuantization, nullptr}; 00234 result.is_variable = is_variable; 00235 result.allocation_type = kTfLiteMemNone; 00236 result.allocation = nullptr; 00237 return result; 00238 } 00239 00240 TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims, 00241 const char* name, bool is_variable) { 00242 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00243 result.type = kTfLiteFloat32; 00244 result.data.f = const_cast<float*>(data); 00245 result.bytes = ElementCount(*dims) * sizeof(float); 00246 return result; 00247 } 00248 00249 void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end) { 00250 float* p = begin; 00251 float* v = tensor->data.f; 00252 while (p != end) { 00253 *v++ = *p++; 00254 } 00255 } 00256 00257 TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims, 00258 const char* name, bool is_variable) { 00259 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00260 result.type = kTfLiteBool; 00261 result.data.b = const_cast<bool*>(data); 00262 result.bytes = ElementCount(*dims) * sizeof(bool); 00263 return result; 00264 } 00265 00266 TfLiteTensor CreateInt32Tensor(const int32_t* data, TfLiteIntArray* dims, 00267 const char* name, bool is_variable) { 00268 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00269 result.type = kTfLiteInt32; 00270 result.data.i32 = const_cast<int32_t*>(data); 00271 result.bytes = ElementCount(*dims) * sizeof(int32_t); 00272 return result; 00273 } 00274 00275 TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, 00276 float scale, int zero_point, 00277 const char* name, bool is_variable) { 00278 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00279 result.type = kTfLiteUInt8; 00280 result.data.uint8 = const_cast<uint8_t*>(data); 00281 result.params = {scale, zero_point}; 00282 result.quantization = {kTfLiteAffineQuantization, nullptr}; 00283 result.bytes = ElementCount(*dims) * sizeof(uint8_t); 00284 return result; 00285 } 00286 00287 // Create Quantized tensor which contains a quantized version of the supplied 00288 // buffer. 00289 TfLiteTensor CreateQuantizedTensor(const float* input, uint8_t* quantized, 00290 TfLiteIntArray* dims, float scale, 00291 int zero_point, const char* name, 00292 bool is_variable) { 00293 int input_size = ElementCount(*dims); 00294 tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point); 00295 return CreateQuantizedTensor(quantized, dims, scale, zero_point, name); 00296 } 00297 00298 TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, 00299 float scale, int zero_point, 00300 const char* name, bool is_variable) { 00301 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00302 result.type = kTfLiteInt8; 00303 result.data.int8 = const_cast<int8_t*>(data); 00304 result.params = {scale, zero_point}; 00305 result.quantization = {kTfLiteAffineQuantization, nullptr}; 00306 result.bytes = ElementCount(*dims) * sizeof(int8_t); 00307 return result; 00308 } 00309 00310 TfLiteTensor CreateQuantizedTensor(const float* input, int8_t* quantized, 00311 TfLiteIntArray* dims, float scale, 00312 int zero_point, const char* name, 00313 bool is_variable) { 00314 int input_size = ElementCount(*dims); 00315 tflite::AsymmetricQuantize(input, quantized, input_size, scale, zero_point); 00316 return CreateQuantizedTensor(quantized, dims, scale, zero_point, name); 00317 } 00318 00319 TfLiteTensor CreateQuantized32Tensor(const int32_t* data, TfLiteIntArray* dims, 00320 float scale, const char* name, 00321 bool is_variable) { 00322 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00323 result.type = kTfLiteInt32; 00324 result.data.i32 = const_cast<int32_t*>(data); 00325 // Quantized int32 tensors always have a zero point of 0, since the range of 00326 // int32 values is large, and because zero point costs extra cycles during 00327 // processing. 00328 result.params = {scale, 0}; 00329 result.quantization = {kTfLiteAffineQuantization, nullptr}; 00330 result.bytes = ElementCount(*dims) * sizeof(int32_t); 00331 return result; 00332 } 00333 00334 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, 00335 TfLiteIntArray* dims, float input_scale, 00336 float weights_scale, const char* name, 00337 bool is_variable) { 00338 float bias_scale = input_scale * weights_scale; 00339 tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); 00340 return CreateQuantized32Tensor(quantized, dims, bias_scale, name, 00341 is_variable); 00342 } 00343 00344 // Quantizes int32 bias tensor with per-channel weights determined by input 00345 // scale multiplied by weight scale for each channel. 00346 TfLiteTensor CreatePerChannelQuantizedBiasTensor( 00347 const float* input, int32_t* quantized, TfLiteIntArray* dims, 00348 float input_scale, float* weight_scales, float* scales, int* zero_points, 00349 TfLiteAffineQuantization* affine_quant, int quantized_dimension, 00350 const char* name, bool is_variable) { 00351 int input_size = ElementCount(*dims); 00352 int num_channels = dims->data[quantized_dimension]; 00353 // First element is reserved for array length 00354 zero_points[0] = num_channels; 00355 scales[0] = static_cast<float>(num_channels); 00356 float* scales_array = &scales[1]; 00357 for (int i = 0; i < num_channels; i++) { 00358 scales_array[i] = input_scale * weight_scales[i]; 00359 zero_points[i + 1] = 0; 00360 } 00361 00362 SymmetricPerChannelQuantize(input, quantized, input_size, num_channels, 00363 scales_array); 00364 00365 affine_quant->scale = FloatArrayFromFloats(scales); 00366 affine_quant->zero_point = IntArrayFromInts(zero_points); 00367 affine_quant->quantized_dimension = quantized_dimension; 00368 00369 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00370 result.type = kTfLiteInt32; 00371 result.data.i32 = const_cast<int32_t*>(quantized); 00372 result.quantization = {kTfLiteAffineQuantization, affine_quant}; 00373 result.bytes = ElementCount(*dims) * sizeof(int32_t); 00374 return result; 00375 } 00376 00377 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( 00378 const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, 00379 int* zero_points, TfLiteAffineQuantization* affine_quant, 00380 int quantized_dimension, const char* name, bool is_variable) { 00381 int channel_count = dims->data[quantized_dimension]; 00382 scales[0] = static_cast<float>(channel_count); 00383 zero_points[0] = channel_count; 00384 00385 SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized, 00386 &scales[1]); 00387 00388 affine_quant->scale = FloatArrayFromFloats(scales); 00389 affine_quant->zero_point = IntArrayFromInts(zero_points); 00390 affine_quant->quantized_dimension = quantized_dimension; 00391 00392 TfLiteTensor result = CreateTensor(dims, name, is_variable); 00393 result.type = kTfLiteInt8; 00394 result.data.int8 = const_cast<int8_t*>(quantized); 00395 result.quantization = {kTfLiteAffineQuantization, affine_quant}; 00396 result.bytes = ElementCount(*dims) * sizeof(int8_t); 00397 return result; 00398 } 00399 00400 } // namespace testing 00401 } // namespace tflite
Generated on Wed Jul 13 2022 16:03:36 by
