Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
micro_interpreter.h
00001 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved. 00002 00003 Licensed under the Apache License, Version 2.0 (the "License"); 00004 you may not use this file except in compliance with the License. 00005 You may obtain a copy of the License at 00006 00007 http://www.apache.org/licenses/LICENSE-2.0 00008 00009 Unless required by applicable law or agreed to in writing, software 00010 distributed under the License is distributed on an "AS IS" BASIS, 00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00012 See the License for the specific language governing permissions and 00013 limitations under the License. 00014 ==============================================================================*/ 00015 #ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_INTERPRETER_H_ 00016 #define TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_INTERPRETER_H_ 00017 00018 #include "tensorflow/lite/c/c_api_internal.h" 00019 #include "tensorflow/lite/core/api/error_reporter.h" 00020 #include "tensorflow/lite/core/api/op_resolver.h" 00021 #include "tensorflow/lite/experimental/micro/micro_allocator.h" 00022 #include "tensorflow/lite/experimental/micro/micro_optional_debug_tools.h" 00023 #include "tensorflow/lite/schema/schema_generated.h" 00024 00025 namespace tflite { 00026 00027 class MicroInterpreter { 00028 public: 00029 // The lifetime of the model, op resolver, tensor arena, and error reporter 00030 // must be at least as long as that of the interpreter object, since the 00031 // interpreter may need to access them at any time. This means that you should 00032 // usually create them with the same scope as each other, for example having 00033 // them all allocated on the stack as local variables through a top-level 00034 // function. 00035 // The interpreter doesn't do any deallocation of any of the pointed-to 00036 // objects, ownership remains with the caller. 00037 MicroInterpreter(const Model* model, const OpResolver& op_resolver, 00038 uint8_t* tensor_arena, size_t tensor_arena_size, 00039 ErrorReporter* error_reporter); 00040 00041 // Specify a particular tensor as pre-allocated. This means that this tensor 00042 // will internally point to the supplied buffer, and no new memory will be 00043 // provided. The buffer must live at least as long as the allocator, since 00044 // the buffer will be used every time an op is invoked which uses the 00045 // specified tensor. Most commonly this is useful when a platform-provided 00046 // DMA buffer is used as an input, and it is desirable to avoid unnecessarily 00047 // allocating a new buffer and copying from the DMA buffer. The user must 00048 // ensure the buffer is valid throughout each interpreter run, and is not 00049 // prematurely overwritten. 00050 TfLiteStatus RegisterPreallocatedInput(uint8_t* buffer, size_t input_index); 00051 00052 // Run through the model and allocate all necessary input, output and 00053 // intermediate tensors except for those already provided via calls to 00054 // registerPreallocatedInput. 00055 TfLiteStatus AllocateTensors(); 00056 00057 TfLiteStatus Invoke(); 00058 00059 size_t tensors_size() const { return context_.tensors_size; } 00060 TfLiteTensor* tensor(size_t tensor_index); 00061 00062 TfLiteTensor* input(size_t index); 00063 size_t inputs_size() const { return subgraph_->inputs()->Length(); } 00064 const flatbuffers::Vector<int32_t>* inputs() { return subgraph_->inputs(); } 00065 00066 TfLiteTensor* output(size_t index); 00067 size_t outputs_size() const { return subgraph_->outputs()->Length(); } 00068 const flatbuffers::Vector<int32_t>* outputs() { return subgraph_->outputs(); } 00069 00070 TfLiteStatus initialization_status() const { return initialization_status_; } 00071 00072 ErrorReporter* error_reporter() { return error_reporter_; } 00073 00074 size_t operators_size() const { return operators_->size(); } 00075 struct pairTfLiteNodeAndRegistration node_and_registration(int node_index); 00076 00077 private: 00078 void CorrectTensorEndianness(TfLiteTensor* tensorCorr); 00079 00080 template <class T> 00081 void CorrectTensorDataEndianness(T* data, int32_t size); 00082 00083 NodeAndRegistration* node_and_registrations_; 00084 00085 const Model* model_; 00086 const OpResolver& op_resolver_; 00087 ErrorReporter* error_reporter_; 00088 TfLiteContext context_; 00089 MicroAllocator allocator_; 00090 bool tensors_allocated_; 00091 00092 TfLiteStatus initialization_status_; 00093 const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors_; 00094 const flatbuffers::Vector<flatbuffers::Offset<Operator>>* operators_; 00095 00096 const SubGraph* subgraph_; 00097 }; 00098 00099 } // namespace tflite 00100 00101 #endif // TENSORFLOW_LITE_EXPERIMENTAL_MICRO_MICRO_INTERPRETER_H_
Generated on Wed Jul 13 2022 16:03:35 by
1.7.2