Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers kernel_util.h Source File

kernel_util.h

00001 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 #ifndef TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
00016 #define TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_
00017 
00018 #include <algorithm>
00019 #include <limits>
00020 
00021 #include "flatbuffers/flatbuffers.h"
00022 #include "tensorflow/lite/c/builtin_op_data.h"
00023 #include "tensorflow/lite/c/c_api_internal.h"
00024 
00025 namespace tflite {
00026 
00027 inline int NumDimensions(const TfLiteTensor* t) { return t->dims->size; }
00028 inline int SizeOfDimension(const TfLiteTensor* t, int dim) {
00029   return t->dims->data[dim];
00030 }
00031 inline const TfLiteTensor* GetInput(TfLiteContext* context,
00032                                     const TfLiteNode* node, int index) {
00033   return &context
00034               ->tensors[flatbuffers::EndianScalar(node->inputs->data[index])];
00035 }
00036 inline TfLiteTensor* GetVariableInput(TfLiteContext* context,
00037                                       const TfLiteNode* node, int index) {
00038   TfLiteTensor* tensor =
00039       &context->tensors[flatbuffers::EndianScalar(node->inputs->data[index])];
00040   return (tensor->is_variable) ? tensor : nullptr;
00041 }
00042 inline TfLiteTensor* GetOutput(TfLiteContext* context, const TfLiteNode* node,
00043                                int index) {
00044   return &context
00045               ->tensors[flatbuffers::EndianScalar(node->outputs->data[index])];
00046 }
00047 inline TfLiteTensor* GetTemporary(TfLiteContext* context,
00048                                   const TfLiteNode* node, int index) {
00049   return &context->tensors[flatbuffers::EndianScalar(
00050       node->temporaries->data[index])];
00051 }
00052 inline const TfLiteTensor* GetIntermediates(TfLiteContext* context,
00053                                             const TfLiteNode* node, int index) {
00054   return &context->tensors[node->intermediates->data[index]];
00055 }
00056 inline int NumInputs(const TfLiteNode* node) { return node->inputs->size; }
00057 inline int NumOutputs(const TfLiteNode* node) { return node->outputs->size; }
00058 inline int NumIntermediates(const TfLiteNode* node) {
00059   return node->intermediates->size;
00060 }
00061 
00062 inline int64_t NumElements(const TfLiteIntArray* dims) {
00063   int64_t count = 1;
00064   for (int i = 0; i < dims->size; ++i) {
00065     count *= dims->data[i];
00066   }
00067   return count;
00068 }
00069 
00070 inline int64_t NumElements(const TfLiteTensor* t) {
00071   return NumElements(t->dims);
00072 }
00073 
00074 inline const TfLiteTensor* GetOptionalInputTensor(TfLiteContext* context,
00075                                                   const TfLiteNode* node,
00076                                                   int index) {
00077   const bool use_tensor = node->inputs->data[index] != kOptionalTensor;
00078   if (use_tensor) {
00079     return &context
00080                 ->tensors[flatbuffers::EndianScalar(node->inputs->data[index])];
00081   }
00082   return nullptr;
00083 }
00084 
00085 // Determines whether tensor is constant.
00086 inline bool IsConstantTensor(const TfLiteTensor* tensor) {
00087   return tensor->allocation_type == kTfLiteMmapRo;
00088 }
00089 
00090 // Determines whether tensor is dynamic. Note that a tensor can be non-const and
00091 // not dynamic. This function specifically checks for a dynamic tensor.
00092 inline bool IsDynamicTensor(const TfLiteTensor* tensor) {
00093   return tensor->allocation_type == kTfLiteDynamic;
00094 }
00095 
00096 // Sets tensor to dynamic.
00097 inline void SetTensorToDynamic(TfLiteTensor* tensor) {
00098   if (tensor->allocation_type != kTfLiteDynamic) {
00099     tensor->allocation_type = kTfLiteDynamic;
00100     tensor->data.raw = nullptr;
00101   }
00102 }
00103 
00104 // Determines whether it is a hybrid op - one that has float inputs and
00105 // quantized weights.
00106 inline bool IsHybridOp(const TfLiteTensor* input, const TfLiteTensor* weight) {
00107   return ((weight->type == kTfLiteUInt8 || weight->type == kTfLiteInt8) &&
00108           input->type == kTfLiteFloat32);
00109 }
00110 
00111 // Check dimensionality match and populate OpData for Conv and DepthwiseConv.
00112 TfLiteStatus PopulateConvolutionQuantizationParams(
00113     TfLiteContext* context, const TfLiteTensor* input,
00114     const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output,
00115     const TfLiteFusedActivation& activation, int32_t* multiplier, int* shift,
00116     int32_t* output_activation_min, int32_t* output_activation_max,
00117     int32_t* per_channel_multiplier, int* per_channel_shift);
00118 
00119 // Calculates the multiplication factor for a quantized convolution (or
00120 // quantized depthwise convolution) involving the given tensors. Returns an
00121 // error if the scales of the tensors are not compatible.
00122 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
00123                                               const TfLiteTensor* input,
00124                                               const TfLiteTensor* filter,
00125                                               const TfLiteTensor* bias,
00126                                               TfLiteTensor* output,
00127                                               double* multiplier);
00128 
00129 TfLiteStatus GetQuantizedConvolutionMultipler(TfLiteContext* context,
00130                                               const TfLiteTensor* input,
00131                                               const TfLiteTensor* filter,
00132                                               TfLiteTensor* output,
00133                                               double* multiplier);
00134 
00135 // Calculates the useful quantized range of an activation layer given its
00136 // activation tensor.
00137 TfLiteStatus CalculateActivationRangeQuantized(TfLiteContext* context,
00138                                                TfLiteFusedActivation activation,
00139                                                TfLiteTensor* output,
00140                                                int32_t* act_min,
00141                                                int32_t* act_max);
00142 void CalculateActivationRangeUint8(TfLiteFusedActivation activation,
00143                                    TfLiteTensor* output, int32_t* act_min,
00144                                    int32_t* act_max);
00145 void CalculateActivationRangeInt8(TfLiteFusedActivation activation,
00146                                   TfLiteTensor* output, int32_t* act_min,
00147                                   int32_t* act_max);
00148 // Calculates the useful range of an activation layer given its activation
00149 // tensor.a
00150 template <typename T>
00151 void CalculateActivationRange(TfLiteFusedActivation activation,
00152                               T* activation_min, T* activation_max) {
00153   if (activation == kTfLiteActRelu) {
00154     *activation_min = 0;
00155     *activation_max = std::numeric_limits<T>::max();
00156   } else if (activation == kTfLiteActRelu6) {
00157     *activation_min = 0;
00158     *activation_max = 6;
00159   } else if (activation == kTfLiteActRelu1) {
00160     *activation_min = -1;
00161     *activation_max = 1;
00162   } else {
00163     *activation_min = std::numeric_limits<T>::lowest();
00164     *activation_max = std::numeric_limits<T>::max();
00165   }
00166 }
00167 
00168 // Return true if the given tensors have the same shape.
00169 bool HaveSameShapes(const TfLiteTensor* input1, const TfLiteTensor* input2);
00170 
00171 // Calculate the output_shape that is necessary for element-wise operations
00172 // with broadcasting involving the two input tensors.
00173 TfLiteStatus CalculateShapeForBroadcast(TfLiteContext* context,
00174                                         const TfLiteTensor* input1,
00175                                         const TfLiteTensor* input2,
00176                                         TfLiteIntArray** output_shape);
00177 }  // namespace tflite
00178 
00179 #endif  // TENSORFLOW_LITE_KERNELS_KERNEL_UTIL_H_