Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers memory_helpers.cc Source File

memory_helpers.cc

00001 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 
00016 #include "tensorflow/lite/experimental/micro/memory_helpers.h"
00017 
00018 #include <cstdint>
00019 
00020 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
00021 
00022 namespace tflite {
00023 
00024 uint8_t* AlignPointerUp(uint8_t* data, size_t alignment) {
00025   std::uintptr_t data_as_uintptr_t = reinterpret_cast<std::uintptr_t>(data);
00026   uint8_t* aligned_result = reinterpret_cast<uint8_t*>(
00027       ((data_as_uintptr_t + (alignment - 1)) / alignment) * alignment);
00028   return aligned_result;
00029 }
00030 
00031 uint8_t* AlignPointerDown(uint8_t* data, size_t alignment) {
00032   std::uintptr_t data_as_uintptr_t = reinterpret_cast<std::uintptr_t>(data);
00033   uint8_t* aligned_result =
00034       reinterpret_cast<uint8_t*>((data_as_uintptr_t / alignment) * alignment);
00035   return aligned_result;
00036 }
00037 
00038 size_t AlignSizeUp(size_t size, size_t alignment) {
00039   size_t aligned_size = (((size + (alignment - 1)) / alignment) * alignment);
00040   return aligned_size;
00041 }
00042 
00043 TfLiteStatus TfLiteTypeSizeOf(TfLiteType type, size_t* size,
00044                               ErrorReporter* reporter) {
00045   switch (type) {
00046     case kTfLiteFloat32:
00047       *size = sizeof(float);
00048       break;
00049     case kTfLiteInt16:
00050       *size = sizeof(int16_t);
00051       break;
00052     case kTfLiteInt32:
00053       *size = sizeof(int32_t);
00054       break;
00055     case kTfLiteUInt8:
00056       *size = sizeof(uint8_t);
00057       break;
00058     case kTfLiteInt8:
00059       *size = sizeof(int8_t);
00060       break;
00061     case kTfLiteInt64:
00062       *size = sizeof(int64_t);
00063       break;
00064     case kTfLiteBool:
00065       *size = sizeof(bool);
00066       break;
00067     case kTfLiteComplex64:
00068       *size = sizeof(float) * 2;
00069       break;
00070     default:
00071       reporter->Report("Type %s (%d) not is not supported",
00072                        TfLiteTypeGetName(type), type);
00073       return kTfLiteError;
00074   }
00075   return kTfLiteOk;
00076 }
00077 
00078 TfLiteStatus BytesRequiredForTensor(const tflite::Tensor& flatbuffer_tensor,
00079                                     size_t* bytes, size_t* type_size,
00080                                     ErrorReporter* error_reporter) {
00081   int element_count = 1;
00082   for (size_t n = 0; n < flatbuffer_tensor.shape()->Length(); ++n) {
00083     element_count *= flatbuffer_tensor.shape()->Get(n);
00084   }
00085 
00086   TfLiteType tf_lite_type;
00087   TF_LITE_ENSURE_STATUS(ConvertTensorType(flatbuffer_tensor.type(),
00088                                           &tf_lite_type, error_reporter));
00089   TF_LITE_ENSURE_STATUS(
00090       TfLiteTypeSizeOf(tf_lite_type, type_size, error_reporter));
00091   *bytes = element_count * (*type_size);
00092   return kTfLiteOk;
00093 }
00094 
00095 }  // namespace tflite