Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers test_utils.h Source File

test_utils.h

00001 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TESTING_TEST_UTILS_H_
00016 #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TESTING_TEST_UTILS_H_
00017 
00018 #include <cmath>
00019 #include <cstdint>
00020 #include <initializer_list>
00021 #include <limits>
00022 
00023 #include "tensorflow/lite/c/c_api_internal.h"
00024 #include "tensorflow/lite/core/api/tensor_utils.h"
00025 #include "tensorflow/lite/experimental/micro/micro_utils.h"
00026 #include "tensorflow/lite/experimental/micro/test_helpers.h"
00027 #include "tensorflow/lite/experimental/micro/testing/micro_test.h"
00028 
00029 namespace tflite {
00030 namespace testing {
00031 
00032 // Note: These methods are deprecated, do not use.  See b/141332970.
00033 
00034 // TODO(kreeger): Don't use this anymore in our tests. Optimized compiler
00035 // settings can play with pointer placement on the stack (b/140130236).
00036 inline TfLiteIntArray* IntArrayFromInitializer(
00037     std::initializer_list<int> int_initializer) {
00038   return IntArrayFromInts(int_initializer.begin());
00039 }
00040 
00041 // Derives the quantization range max from scaling factor and zero point.
00042 template <typename T>
00043 inline float MaxFromZeroPointScale(const int zero_point, const float scale) {
00044   return (std::numeric_limits<T>::max() - zero_point) * scale;
00045 }
00046 
00047 // Derives the quantization range min from scaling factor and zero point.
00048 template <typename T>
00049 inline float MinFromZeroPointScale(const int zero_point, const float scale) {
00050   return (std::numeric_limits<T>::min() - zero_point) * scale;
00051 }
00052 
00053 // Derives the quantization scaling factor from a min and max range.
00054 template <typename T>
00055 inline float ScaleFromMinMax(const float min, const float max) {
00056   return (max - min) / ((std::numeric_limits<T>::max() * 1.0) -
00057                         std::numeric_limits<T>::min());
00058 }
00059 
00060 // Derives the quantization zero point from a min and max range.
00061 template <typename T>
00062 inline int ZeroPointFromMinMax(const float min, const float max) {
00063   return static_cast<int>(std::numeric_limits<T>::min()) +
00064          static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f);
00065 }
00066 
00067 // Converts a float value into an unsigned eight-bit quantized value.
00068 inline uint8_t F2Q(const float value, const float min, const float max) {
00069   int32_t result = ZeroPointFromMinMax<uint8_t>(min, max) +
00070                    (value / ScaleFromMinMax<uint8_t>(min, max)) + 0.5f;
00071   if (result < std::numeric_limits<uint8_t>::min()) {
00072     result = std::numeric_limits<uint8_t>::min();
00073   }
00074   if (result > std::numeric_limits<uint8_t>::max()) {
00075     result = std::numeric_limits<uint8_t>::max();
00076   }
00077   return result;
00078 }
00079 
00080 // Converts a float value into a signed eight-bit quantized value.
00081 inline int8_t F2QS(const float value, const float min, const float max) {
00082   return F2Q(value, min, max) + std::numeric_limits<int8_t>::min();
00083 }
00084 
00085 // Converts a float value into a signed thirty-two-bit quantized value.  Note
00086 // that values close to max int and min int may see significant error due to
00087 // a lack of floating point granularity for large values.
00088 inline int32_t F2Q32(const float value, const float scale) {
00089   double quantized = value / scale;
00090   if (quantized > std::numeric_limits<int32_t>::max()) {
00091     quantized = std::numeric_limits<int32_t>::max();
00092   } else if (quantized < std::numeric_limits<int32_t>::min()) {
00093     quantized = std::numeric_limits<int32_t>::min();
00094   }
00095   return static_cast<int>(quantized);
00096 }
00097 
00098 // TODO(b/141330728): Move this method elsewhere as part clean up.
00099 inline void PopulateContext(TfLiteTensor* tensors, int tensors_size,
00100                             TfLiteContext* context) {
00101   context->tensors_size = tensors_size;
00102   context->tensors = tensors;
00103   context->impl_ = static_cast<void*>(micro_test::reporter);
00104   context->GetExecutionPlan = nullptr;
00105   context->ResizeTensor = nullptr;
00106   context->ReportError = ReportOpError;
00107   context->AddTensors = nullptr;
00108   context->GetNodeAndRegistration = nullptr;
00109   context->ReplaceNodeSubsetsWithDelegateKernels = nullptr;
00110   context->recommended_num_threads = 1;
00111   context->GetExternalContext = nullptr;
00112   context->SetExternalContext = nullptr;
00113 
00114   for (int i = 0; i < tensors_size; ++i) {
00115     if (context->tensors[i].is_variable) {
00116       ResetVariableTensor(&context->tensors[i]);
00117     }
00118   }
00119 }
00120 inline TfLiteTensor CreateFloatTensor(std::initializer_list<float> data,
00121                                       TfLiteIntArray* dims, const char* name,
00122                                       bool is_variable = false) {
00123   return CreateFloatTensor(data.begin(), dims, name, is_variable);
00124 }
00125 
00126 inline TfLiteTensor CreateBoolTensor(std::initializer_list<bool> data,
00127                                      TfLiteIntArray* dims, const char* name,
00128                                      bool is_variable = false) {
00129   return CreateBoolTensor(data.begin(), dims, name, is_variable);
00130 }
00131 
00132 inline TfLiteTensor CreateQuantizedTensor(const uint8_t* data,
00133                                           TfLiteIntArray* dims,
00134                                           const char* name, float min,
00135                                           float max, bool is_variable = false) {
00136   TfLiteTensor result;
00137   result.type = kTfLiteUInt8;
00138   result.data.uint8 = const_cast<uint8_t*>(data);
00139   result.dims = dims;
00140   result.params = {ScaleFromMinMax<uint8_t>(min, max),
00141                    ZeroPointFromMinMax<uint8_t>(min, max)};
00142   result.allocation_type = kTfLiteMemNone;
00143   result.bytes = ElementCount(*dims) * sizeof(uint8_t);
00144   result.allocation = nullptr;
00145   result.name = name;
00146   result.is_variable = false;
00147   return result;
00148 }
00149 
00150 inline TfLiteTensor CreateQuantizedTensor(std::initializer_list<uint8_t> data,
00151                                           TfLiteIntArray* dims,
00152                                           const char* name, float min,
00153                                           float max, bool is_variable = false) {
00154   return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable);
00155 }
00156 
00157 inline TfLiteTensor CreateQuantizedTensor(const int8_t* data,
00158                                           TfLiteIntArray* dims,
00159                                           const char* name, float min,
00160                                           float max, bool is_variable = false) {
00161   TfLiteTensor result;
00162   result.type = kTfLiteInt8;
00163   result.data.int8 = const_cast<int8_t*>(data);
00164   result.dims = dims;
00165   result.params = {ScaleFromMinMax<int8_t>(min, max),
00166                    ZeroPointFromMinMax<int8_t>(min, max)};
00167   result.allocation_type = kTfLiteMemNone;
00168   result.bytes = ElementCount(*dims) * sizeof(int8_t);
00169   result.allocation = nullptr;
00170   result.name = name;
00171   result.is_variable = is_variable;
00172   return result;
00173 }
00174 
00175 inline TfLiteTensor CreateQuantizedTensor(std::initializer_list<int8_t> data,
00176                                           TfLiteIntArray* dims,
00177                                           const char* name, float min,
00178                                           float max, bool is_variable = false) {
00179   return CreateQuantizedTensor(data.begin(), dims, name, min, max, is_variable);
00180 }
00181 
00182 inline TfLiteTensor CreateQuantizedTensor(float* data, uint8_t* quantized_data,
00183                                           TfLiteIntArray* dims,
00184                                           const char* name,
00185                                           bool is_variable = false) {
00186   TfLiteTensor result;
00187   SymmetricQuantize(data, dims, quantized_data, &result.params.scale);
00188   result.data.uint8 = quantized_data;
00189   result.type = kTfLiteUInt8;
00190   result.dims = dims;
00191   result.params.zero_point = 128;
00192   result.allocation_type = kTfLiteMemNone;
00193   result.bytes = ElementCount(*dims) * sizeof(uint8_t);
00194   result.allocation = nullptr;
00195   result.name = name;
00196   result.is_variable = is_variable;
00197   return result;
00198 }
00199 
00200 inline TfLiteTensor CreateQuantizedTensor(float* data, int8_t* quantized_data,
00201                                           TfLiteIntArray* dims,
00202                                           const char* name,
00203                                           bool is_variable = false) {
00204   TfLiteTensor result;
00205   SignedSymmetricQuantize(data, dims, quantized_data, &result.params.scale);
00206   result.data.int8 = quantized_data;
00207   result.type = kTfLiteInt8;
00208   result.dims = dims;
00209   result.params.zero_point = 0;
00210   result.allocation_type = kTfLiteMemNone;
00211   result.bytes = ElementCount(*dims) * sizeof(int8_t);
00212   result.allocation = nullptr;
00213   result.name = name;
00214   result.is_variable = is_variable;
00215   return result;
00216 }
00217 
00218 inline TfLiteTensor CreateQuantized32Tensor(const int32_t* data,
00219                                             TfLiteIntArray* dims,
00220                                             const char* name, float scale,
00221                                             bool is_variable = false) {
00222   TfLiteTensor result;
00223   result.type = kTfLiteInt32;
00224   result.data.i32 = const_cast<int32_t*>(data);
00225   result.dims = dims;
00226   // Quantized int32 tensors always have a zero point of 0, since the range of
00227   // int32 values is large, and because zero point costs extra cycles during
00228   // processing.
00229   result.params = {scale, 0};
00230   result.allocation_type = kTfLiteMemNone;
00231   result.bytes = ElementCount(*dims) * sizeof(int32_t);
00232   result.allocation = nullptr;
00233   result.name = name;
00234   result.is_variable = is_variable;
00235   return result;
00236 }
00237 
00238 inline TfLiteTensor CreateQuantized32Tensor(std::initializer_list<int32_t> data,
00239                                             TfLiteIntArray* dims,
00240                                             const char* name, float scale,
00241                                             bool is_variable = false) {
00242   return CreateQuantized32Tensor(data.begin(), dims, name, scale, is_variable);
00243 }
00244 
00245 template <typename input_type = int32_t,
00246           TfLiteType tensor_input_type = kTfLiteInt32>
00247 inline TfLiteTensor CreateTensor(const input_type* data, TfLiteIntArray* dims,
00248                                  const char* name, bool is_variable = false) {
00249   TfLiteTensor result;
00250   result.type = tensor_input_type;
00251   result.data.raw = reinterpret_cast<char*>(const_cast<input_type*>(data));
00252   result.dims = dims;
00253   result.allocation_type = kTfLiteMemNone;
00254   result.bytes = ElementCount(*dims) * sizeof(input_type);
00255   result.allocation = nullptr;
00256   result.name = name;
00257   result.is_variable = is_variable;
00258   return result;
00259 }
00260 
00261 template <typename input_type = int32_t,
00262           TfLiteType tensor_input_type = kTfLiteInt32>
00263 inline TfLiteTensor CreateTensor(std::initializer_list<input_type> data,
00264                                  TfLiteIntArray* dims, const char* name,
00265                                  bool is_variable = false) {
00266   return CreateTensor<input_type, tensor_input_type>(data.begin(), dims, name,
00267                                                      is_variable);
00268 }
00269 
00270 }  // namespace testing
00271 }  // namespace tflite
00272 
00273 #endif  // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_TESTING_TEST_UTILS_H_