Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers softmax.cc Source File

softmax.cc

00001 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 
00016 #include "tensorflow/lite/kernels/internal/reference/softmax.h"
00017 #include "tensorflow/lite/c/builtin_op_data.h"
00018 #include "tensorflow/lite/c/c_api_internal.h"
00019 #include "tensorflow/lite/kernels/internal/common.h"
00020 #include "tensorflow/lite/kernels/internal/quantization_util.h"
00021 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
00022 #include "tensorflow/lite/kernels/kernel_util.h"
00023 #include "tensorflow/lite/kernels/op_macros.h"
00024 
00025 namespace tflite {
00026 namespace ops {
00027 namespace micro {
00028 namespace activations {
00029 namespace {
00030 
00031 struct OpData {
00032   int32_t input_multiplier = 0;
00033   int input_left_shift = 0;
00034   int32_t input_range_radius = 0;
00035   int diff_min = 0;
00036 };
00037 
00038 TfLiteStatus CalculateSoftmaxOpData(TfLiteContext* context,
00039                                     const TfLiteTensor* input,
00040                                     TfLiteTensor* output,
00041                                     const TfLiteSoftmaxParams* params,
00042                                     OpData* data) {
00043   if (input->type == kTfLiteUInt8) {
00044     TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0);
00045     TF_LITE_ENSURE(context, output->params.scale == 1.f / 256);
00046 
00047     static const int kScaledDiffIntegerBits = 5;
00048 
00049     tflite::PreprocessSoftmaxScaling(
00050         params->beta, input->params.scale, kScaledDiffIntegerBits,
00051         &data->input_multiplier, &data->input_left_shift);
00052     data->diff_min = -1.0 * tflite::CalculateInputRadius(
00053                                 kScaledDiffIntegerBits, data->input_left_shift);
00054   }
00055   return kTfLiteOk;
00056 }
00057 
00058 }  // namespace
00059 
00060 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
00061   return nullptr;
00062 }
00063 
00064 void Free(TfLiteContext* context, void* buffer) {}
00065 
00066 TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) {
00067   return kTfLiteOk;
00068 }
00069 
00070 // Takes a 1D tensor and performs softmax along it.
00071 void Softmax1DFloat(const TfLiteTensor* input, TfLiteTensor* output,
00072                     TfLiteSoftmaxParams* params) {
00073   const int input_size = input->dims->data[0];
00074   tflite::reference_ops::Softmax(input->data.f, input_size, 1, params->beta,
00075                                  output->data.f);
00076 }
00077 
00078 // Takes a 2D tensor and perform softmax along the last dimension.
00079 void Softmax2DFloat(const TfLiteTensor* input, TfLiteTensor* output,
00080                     TfLiteSoftmaxParams* params) {
00081   const int batch_size = input->dims->data[0];
00082   const int input_size = input->dims->data[1];
00083   tflite::reference_ops::Softmax(input->data.f, input_size, batch_size,
00084                                  params->beta, output->data.f);
00085 }
00086 
00087 void Softmax1DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
00088                         TfLiteSoftmaxParams* params, OpData* data) {
00089   // TODO(ahentz): this is arguably a dirty trick. Since the implementation
00090   // always traverses the last dimension of a 4D tensor, we will pretend our 1D
00091   // tensor is 4D in a special way. We will convert a (Y) shape into a (1,
00092   // 1, 1, Y) shape.
00093   const int input_size = input->dims->data[0];
00094   const int32_t shape_data[4] = {1, 1, 1, input_size};
00095   RuntimeShape shape(4, shape_data);
00096   SoftmaxParams op_params;
00097   op_params.input_multiplier = data->input_multiplier;
00098   op_params.input_left_shift = data->input_left_shift;
00099   op_params.diff_min = data->diff_min;
00100   tflite::reference_ops::Softmax(op_params, shape,
00101                                  GetTensorData<uint8_t>(input), shape,
00102                                  GetTensorData<uint8_t>(output));
00103 }
00104 
00105 void Softmax2DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
00106                         TfLiteSoftmaxParams* params, OpData* data) {
00107   // TODO(ahentz): this is arguably a dirty trick. Since the implementation
00108   // always traverses the last dimension of a 4D tensor, we will pretend our 2D
00109   // tensor is 4D in a special way. We will convert a (X, Y) shape into a (X,
00110   // 1, 1, Y) shape.
00111   const int batch_size = input->dims->data[0];
00112   const int input_size = input->dims->data[1];
00113   const int32_t shape_data[4] = {batch_size, 1, 1, input_size};
00114   RuntimeShape shape(4, shape_data);
00115   SoftmaxParams op_params;
00116   op_params.input_multiplier = data->input_multiplier;
00117   op_params.input_left_shift = data->input_left_shift;
00118   op_params.diff_min = data->diff_min;
00119   tflite::reference_ops::Softmax(op_params, shape,
00120                                  GetTensorData<uint8_t>(input), shape,
00121                                  GetTensorData<uint8_t>(output));
00122 }
00123 
00124 // Takes a 4D tensor and perform softmax along the forth dimension.
00125 void Softmax4DFloat(const TfLiteTensor* input, TfLiteTensor* output,
00126                     TfLiteSoftmaxParams* params) {
00127   SoftmaxParams op_params;
00128   op_params.beta = params->beta;
00129   tflite::reference_ops::Softmax(
00130       op_params, GetTensorShape(input), GetTensorData<float>(input),
00131       GetTensorShape(output), GetTensorData<float>(output));
00132 }
00133 
00134 void Softmax4DQuantized(const TfLiteTensor* input, TfLiteTensor* output,
00135                         TfLiteSoftmaxParams* params, OpData* data) {
00136   SoftmaxParams op_params;
00137   op_params.input_multiplier = data->input_multiplier;
00138   op_params.input_left_shift = data->input_left_shift;
00139   op_params.diff_min = data->diff_min;
00140   tflite::reference_ops::Softmax(
00141       op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
00142       GetTensorShape(output), GetTensorData<uint8_t>(output));
00143 }
00144 
00145 TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) {
00146   auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data);
00147 
00148   const TfLiteTensor* input = GetInput(context, node, 0);
00149   TfLiteTensor* output = GetOutput(context, node, 0);
00150 
00151   OpData local_data_object;
00152   OpData* data = &local_data_object;
00153   TF_LITE_ENSURE_STATUS(
00154       CalculateSoftmaxOpData(context, input, output, params, data));
00155 
00156   // TODO(ahentz): consider an implementation that works for many (all?)
00157   // dimensions.
00158   switch (input->type) {
00159     case kTfLiteFloat32: {
00160       if (NumDimensions(input) == 1) {
00161         Softmax1DFloat(input, output, params);
00162         return kTfLiteOk;
00163       }
00164       if (NumDimensions(input) == 2) {
00165         Softmax2DFloat(input, output, params);
00166         return kTfLiteOk;
00167       }
00168       if (NumDimensions(input) == 4) {
00169         Softmax4DFloat(input, output, params);
00170         return kTfLiteOk;
00171       }
00172       context->ReportError(
00173           context, "Only 1D, 2D and 4D tensors supported currently, got %dD.",
00174           NumDimensions(input));
00175       return kTfLiteError;
00176     }
00177     case kTfLiteUInt8: {
00178       if (NumDimensions(input) == 1) {
00179         Softmax1DQuantized(input, output, params, data);
00180         return kTfLiteOk;
00181       }
00182       if (NumDimensions(input) == 2) {
00183         Softmax2DQuantized(input, output, params, data);
00184         return kTfLiteOk;
00185       }
00186       if (NumDimensions(input) == 4) {
00187         Softmax4DQuantized(input, output, params, data);
00188         return kTfLiteOk;
00189       }
00190       context->ReportError(
00191           context, "Only 2D and 4D tensors supported currently, got %dD.",
00192           NumDimensions(input));
00193       return kTfLiteError;
00194     }
00195     default:
00196       context->ReportError(
00197           context, "Only float32 and uint8_t supported currently, got %d.",
00198           input->type);
00199       return kTfLiteError;
00200   }
00201 }
00202 }  // namespace activations
00203 
00204 TfLiteRegistration* Register_SOFTMAX() {
00205   static TfLiteRegistration r = {activations::Init, activations::Free,
00206                                  activations::SoftmaxPrepare,
00207                                  activations::SoftmaxEval};
00208   return &r;
00209 }
00210 
00211 }  // namespace micro
00212 }  // namespace ops
00213 }  // namespace tflite