Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers quantize.h Source File

quantize.h

00001 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
00016 #define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_
00017 
00018 #include "tensorflow/lite/kernels/internal/common.h"
00019 #include "tensorflow/lite/kernels/internal/round.h"
00020 #include "tensorflow/lite/kernels/internal/types.h"
00021 
00022 namespace tflite {
00023 
00024 namespace reference_ops {
00025 
00026 template <typename T>
00027 inline void AffineQuantize(const tflite::QuantizationParams& op_params,
00028                            const RuntimeShape& input_shape,
00029                            const float* input_data,
00030                            const RuntimeShape& output_shape, T* output_data) {
00031   const int32 zero_point = op_params.zero_point;
00032   const double scale = static_cast<double>(op_params.scale);
00033   const int flat_size = MatchingFlatSize(input_shape, output_shape);
00034   static constexpr int32 min_val = std::numeric_limits<T>::min();
00035   static constexpr int32 max_val = std::numeric_limits<T>::max();
00036 
00037   for (int i = 0; i < flat_size; i++) {
00038     const float val = input_data[i];
00039     int32 unclamped = static_cast<int32>(TfLiteRound(val / scale)) + zero_point;
00040     int32 clamped = std::min(std::max(unclamped, min_val), max_val);
00041     output_data[i] = clamped;
00042   }
00043 }
00044 
00045 }  // namespace reference_ops
00046 
00047 }  // namespace tflite
00048 #endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_QUANTIZE_H_