Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
test_helpers.h
00001 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved. 00002 00003 Licensed under the Apache License, Version 2.0 (the "License"); 00004 you may not use this file except in compliance with the License. 00005 You may obtain a copy of the License at 00006 00007 http://www.apache.org/licenses/LICENSE-2.0 00008 00009 Unless required by applicable law or agreed to in writing, software 00010 distributed under the License is distributed on an "AS IS" BASIS, 00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00012 See the License for the specific language governing permissions and 00013 limitations under the License. 00014 ==============================================================================*/ 00015 00016 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TEST_HELPERS_H_ 00017 #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TEST_HELPERS_H_ 00018 00019 // Useful functions for writing tests. 00020 00021 #include "tensorflow/lite/c/c_api_internal.h" 00022 #include "tensorflow/lite/core/api/error_reporter.h" 00023 #include "tensorflow/lite/schema/schema_generated.h" 00024 00025 namespace tflite { 00026 namespace testing { 00027 00028 // Returns an example flatbuffer TensorFlow Lite model. 00029 const Model* GetMockModel(); 00030 00031 // Builds a one-dimensional flatbuffer tensor of the given size. 00032 const Tensor* Create1dFlatbufferTensor(int size); 00033 00034 // Creates a one-dimensional tensor with no quantization metadata. 00035 const Tensor* CreateMissingQuantizationFlatbufferTensor(int size); 00036 00037 // Creates a vector of flatbuffer buffers. 00038 const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* 00039 CreateFlatbufferBuffers(); 00040 00041 // Performs a simple string comparison without requiring standard C library. 00042 int TestStrcmp(const char* a, const char* b); 00043 00044 // Wrapper to forward kernel errors to the interpreter's error reporter. 00045 void ReportOpError(struct TfLiteContext* context, const char* format, ...); 00046 00047 void PopulateContext(TfLiteTensor* tensors, int tensors_size, 00048 TfLiteContext* context); 00049 00050 // Create a TfLiteIntArray from an array of ints. The first element in the 00051 // supplied array must be the size of the array expressed as an int. 00052 TfLiteIntArray* IntArrayFromInts(const int* int_array); 00053 00054 // Create a TfLiteFloatArray from an array of floats. The first element in the 00055 // supplied array must be the size of the array expressed as a float. 00056 TfLiteFloatArray* FloatArrayFromFloats(const float* floats); 00057 00058 TfLiteTensor CreateFloatTensor(const float* data, TfLiteIntArray* dims, 00059 const char* name, bool is_variable = false); 00060 00061 void PopulateFloatTensor(TfLiteTensor* tensor, float* begin, float* end); 00062 00063 TfLiteTensor CreateBoolTensor(const bool* data, TfLiteIntArray* dims, 00064 const char* name, bool is_variable = false); 00065 00066 TfLiteTensor CreateInt32Tensor(const int32_t*, TfLiteIntArray* dims, 00067 const char* name, bool is_variable = false); 00068 00069 TfLiteTensor CreateQuantizedTensor(const uint8_t* data, TfLiteIntArray* dims, 00070 float scale, int zero_point, 00071 const char* name, bool is_variable = false); 00072 00073 TfLiteTensor CreateQuantizedTensor(const float* input, uint8_t* quantized, 00074 TfLiteIntArray* dims, float scale, 00075 int zero_point, const char* name, 00076 bool is_variable = false); 00077 00078 TfLiteTensor CreateQuantizedTensor(const int8_t* data, TfLiteIntArray* dims, 00079 float scale, int zero_point, 00080 const char* name, bool is_variable = false); 00081 00082 TfLiteTensor CreateQuantizedTensor(const float* input, int8_t* quantized, 00083 TfLiteIntArray* dims, float scale, 00084 int zero_point, const char* name, 00085 bool is_variable = false); 00086 00087 TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, 00088 TfLiteIntArray* dims, float input_scale, 00089 float weights_scale, const char* name, 00090 bool is_variable = false); 00091 00092 // Quantizes int32 bias tensor with per-channel weights determined by input 00093 // scale multiplied by weight scale for each channel. 00094 TfLiteTensor CreatePerChannelQuantizedBiasTensor( 00095 const float* input, int32_t* quantized, TfLiteIntArray* dims, 00096 float input_scale, float* weight_scales, float* scales, int* zero_points, 00097 TfLiteAffineQuantization* affine_quant, int quantized_dimension, 00098 const char* name, bool is_variable = false); 00099 00100 TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( 00101 const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, 00102 int* zero_points, TfLiteAffineQuantization* affine_quant, 00103 int quantized_dimension, const char* name, bool is_variable = false); 00104 00105 } // namespace testing 00106 } // namespace tflite 00107 00108 #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TEST_HELPERS_H_
Generated on Wed Jul 13 2022 16:03:36 by
1.7.2