Important changes to repositories hosted on mbed.com
Mbed hosted mercurial repositories are deprecated and are due to be permanently deleted in July 2026.
To keep a copy of this software download the repository Zip archive or clone locally using Mercurial.
It is also possible to export all your personal repositories from the account settings page.
micro_optional_debug_tools.cc
00001 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved. 00002 00003 Licensed under the Apache License, Version 2.0 (the "License"); 00004 you may not use this file except in compliance with the License. 00005 You may obtain a copy of the License at 00006 00007 http://www.apache.org/licenses/LICENSE-2.0 00008 00009 Unless required by applicable law or agreed to in writing, software 00010 distributed under the License is distributed on an "AS IS" BASIS, 00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 00012 See the License for the specific language governing permissions and 00013 limitations under the License. 00014 ==============================================================================*/ 00015 #include "tensorflow/lite/experimental/micro/micro_optional_debug_tools.h" 00016 00017 #include "tensorflow/lite/schema/schema_generated.h" 00018 namespace tflite { 00019 00020 std::vector<int> flatbuffersVector2StdVector( 00021 const flatbuffers::Vector<int32_t>* fVector) { 00022 std::vector<int> stdVector; 00023 for (size_t i = 0; i < fVector->size(); i++) { 00024 stdVector.push_back(fVector->Get(i)); 00025 } 00026 return stdVector; 00027 } 00028 00029 void PrintIntVector(const std::vector<int>& v) { 00030 for (const auto& it : v) { 00031 printf(" %d", it); 00032 } 00033 printf("\n"); 00034 } 00035 00036 void PrintTfLiteIntVector(const TfLiteIntArray* v) { 00037 if (!v) { 00038 printf(" (null)\n"); 00039 return; 00040 } 00041 for (int k = 0; k < v->size; k++) { 00042 printf(" %d", v->data[k]); 00043 } 00044 printf("\n"); 00045 } 00046 00047 const char* TensorTypeName(TfLiteType type) { 00048 switch (type) { 00049 case kTfLiteNoType: 00050 return "kTfLiteNoType"; 00051 case kTfLiteFloat32: 00052 return "kTfLiteFloat32"; 00053 case kTfLiteInt32: 00054 return "kTfLiteInt32"; 00055 case kTfLiteUInt8: 00056 return "kTfLiteUInt8"; 00057 case kTfLiteInt8: 00058 return "kTfLiteInt8"; 00059 case kTfLiteInt64: 00060 return "kTfLiteInt64"; 00061 case kTfLiteString: 00062 return "kTfLiteString"; 00063 case kTfLiteBool: 00064 return "kTfLiteBool"; 00065 case kTfLiteInt16: 00066 return "kTfLiteInt16"; 00067 case kTfLiteComplex64: 00068 return "kTfLiteComplex64"; 00069 case kTfLiteFloat16: 00070 return "kTfLiteFloat16"; 00071 } 00072 return "(invalid)"; 00073 } 00074 00075 const char* AllocTypeName(TfLiteAllocationType type) { 00076 switch (type) { 00077 case kTfLiteMemNone: 00078 return "kTfLiteMemNone"; 00079 case kTfLiteMmapRo: 00080 return "kTfLiteMmapRo"; 00081 case kTfLiteDynamic: 00082 return "kTfLiteDynamic"; 00083 case kTfLiteArenaRw: 00084 return "kTfLiteArenaRw"; 00085 case kTfLiteArenaRwPersistent: 00086 return "kTfLiteArenaRwPersistent"; 00087 } 00088 return "(invalid)"; 00089 } 00090 00091 // Prints a dump of what tensors and what nodes are in the interpreter. 00092 void PrintInterpreterState(MicroInterpreter* interpreter) { 00093 printf("Interpreter has %zu tensors and %zu nodes\n", 00094 interpreter->tensors_size(), interpreter->operators_size()); 00095 printf("Inputs:"); 00096 PrintIntVector(flatbuffersVector2StdVector(interpreter->inputs())); 00097 printf("Outputs:"); 00098 PrintIntVector(flatbuffersVector2StdVector(interpreter->outputs())); 00099 printf("\n"); 00100 00101 for (size_t tensor_index = 0; tensor_index < interpreter->tensors_size(); 00102 tensor_index++) { 00103 TfLiteTensor* tensor = interpreter->tensor(static_cast<int>(tensor_index)); 00104 printf("Tensor %3zu %-20s %10s %15s %10zu bytes (%4.1f MB) ", tensor_index, 00105 tensor->name, TensorTypeName(tensor->type), 00106 AllocTypeName(tensor->allocation_type), tensor->bytes, 00107 static_cast<double>(tensor->bytes / (1 << 20))); 00108 PrintTfLiteIntVector(tensor->dims); 00109 } 00110 printf("\n"); 00111 00112 for (size_t node_index = 0; node_index < interpreter->operators_size(); 00113 node_index++) { 00114 struct pairTfLiteNodeAndRegistration node_and_reg = 00115 interpreter->node_and_registration(static_cast<int>(node_index)); 00116 const TfLiteNode& node = node_and_reg.node; 00117 const TfLiteRegistration* reg = node_and_reg.registration; 00118 if (reg->custom_name != nullptr) { 00119 printf("Node %3zu Operator Custom Name %s\n", node_index, 00120 reg->custom_name); 00121 } else { 00122 printf("Node %3zu Operator Builtin Code %3d %s\n", node_index, 00123 reg->builtin_code, EnumNamesBuiltinOperator()[reg->builtin_code]); 00124 } 00125 printf(" Inputs:"); 00126 PrintTfLiteIntVector(node.inputs); 00127 printf(" Outputs:"); 00128 PrintTfLiteIntVector(node.outputs); 00129 } 00130 } 00131 00132 } // namespace tflite
Generated on Wed Jul 13 2022 16:03:35 by
