Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers flatbuffer_conversions.h Source File

flatbuffer_conversions.h

00001 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 #ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
00016 #define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
00017 
00018 // These functions transform codes and data structures that are defined in the
00019 // flatbuffer serialization format into in-memory values that are used by the
00020 // runtime API and interpreter.
00021 
00022 #include "tensorflow/lite/c/c_api_internal.h"
00023 #include "tensorflow/lite/core/api/error_reporter.h"
00024 #include "tensorflow/lite/core/api/op_resolver.h"
00025 #include "tensorflow/lite/schema/schema_generated.h"
00026 
00027 namespace tflite {
00028 
00029 // Interface class for builtin data allocations.
00030 class BuiltinDataAllocator {
00031  public:
00032   virtual void* Allocate(size_t size) = 0;
00033   virtual void Deallocate(void* data) = 0;
00034 
00035   // Allocate a structure, but make sure it is a POD structure that doesn't
00036   // require constructors to run. The reason we do this, is that Interpreter's C
00037   // extension part will take ownership so destructors  will not be run during
00038   // deallocation.
00039   template <typename T>
00040   T* AllocatePOD() {
00041     static_assert(std::is_pod<T>::value, "Builtin data structure must be POD.");
00042     return static_cast<T*>(this->Allocate(sizeof(T)));
00043   }
00044 
00045   virtual ~BuiltinDataAllocator() {}
00046 };
00047 
00048 // Parse the appropriate data out of the op.
00049 //
00050 // This handles builtin data explicitly as there are flatbuffer schemas.
00051 // If it returns kTfLiteOk, it passes the data out with `builtin_data`. The
00052 // calling function has to pass in an allocator object, and this allocator
00053 // will be called to reserve space for the output data. If the calling
00054 // function's allocator reserves memory on the heap, then it's the calling
00055 // function's responsibility to free it.
00056 // If it returns kTfLiteError, `builtin_data` will be `nullptr`.
00057 TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
00058                          ErrorReporter* error_reporter,
00059                          BuiltinDataAllocator* allocator, void** builtin_data);
00060 
00061 // Converts the tensor data type used in the flat buffer to the representation
00062 // used by the runtime.
00063 TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
00064                                ErrorReporter* error_reporter);
00065 
00066 }  // namespace tflite
00067 
00068 #endif  // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_