Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers flatbuffer_conversions.cc Source File

flatbuffer_conversions.cc

00001 /* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 
00016 #include "tensorflow/lite/core/api/flatbuffer_conversions.h"
00017 
00018 #include <cstdlib>
00019 
00020 #include "tensorflow/lite/c/builtin_op_data.h"
00021 #include "tensorflow/lite/c/c_api_internal.h"
00022 #include "tensorflow/lite/schema/schema_generated.h"
00023 
00024 namespace tflite {
00025 
00026 namespace {
00027 
00028 // Utility class for safely allocating POD data. This is useful for avoiding
00029 // leaks in cases where op params are allocated but fail to propagate to the
00030 // parsed op data (e.g., when model parameters are invalid).
00031 class SafeBuiltinDataAllocator {
00032  public:
00033   class BuiltinDataDeleter {
00034    public:
00035     explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
00036         : allocator_(allocator) {}
00037 
00038     void operator()(void* data) { allocator_->Deallocate(data); }
00039 
00040    private:
00041     BuiltinDataAllocator* allocator_;
00042   };
00043 
00044   template <typename T>
00045   using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
00046 
00047   explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
00048       : allocator_(allocator) {}
00049 
00050   template <typename T>
00051   BuiltinDataPtr<T> Allocate() {
00052     return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
00053                              BuiltinDataDeleter(allocator_));
00054   }
00055 
00056  private:
00057   BuiltinDataAllocator* allocator_;
00058 };
00059 
00060 // Copies the contents from the flatbuffer int vector `flatbuffer` into the
00061 // int array `buffer`. `flat_vector` and `buffer` represent the same
00062 // configuration operation for a given operation.
00063 TfLiteStatus FlatBufferIntVectorToArray(
00064     int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
00065     int* buffer, ErrorReporter* error_reporter, const char* op_name) {
00066   if (!flat_vector) {
00067     error_reporter->Report("Input array not provided for operation '%s'.\n",
00068                            op_name);
00069     return kTfLiteError;
00070   } else {
00071     int num_dimensions = flat_vector->size();
00072     if (num_dimensions > max_size_of_buffer / sizeof(int)) {
00073       error_reporter->Report(
00074           "Found too many dimensions in the input array of operation '%s'.\n",
00075           op_name);
00076       return kTfLiteError;
00077     } else {
00078       for (int i = 0; i < num_dimensions; ++i) {
00079         buffer[i] = flat_vector->Get(i);
00080       }
00081     }
00082   }
00083   return kTfLiteOk;
00084 }
00085 
00086 }  // namespace
00087 
00088 TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
00089                                ErrorReporter* error_reporter) {
00090   *type = kTfLiteNoType;
00091   switch (tensor_type) {
00092     case TensorType_FLOAT32:
00093       *type = kTfLiteFloat32;
00094       break;
00095     case TensorType_FLOAT16:
00096       *type = kTfLiteFloat16;
00097       break;
00098     case TensorType_INT16:
00099       *type = kTfLiteInt16;
00100       break;
00101     case TensorType_INT32:
00102       *type = kTfLiteInt32;
00103       break;
00104     case TensorType_UINT8:
00105       *type = kTfLiteUInt8;
00106       break;
00107     case TensorType_INT8:
00108       *type = kTfLiteInt8;
00109       break;
00110     case TensorType_INT64:
00111       *type = kTfLiteInt64;
00112       break;
00113     case TensorType_STRING:
00114       *type = kTfLiteString;
00115       break;
00116     case TensorType_BOOL:
00117       *type = kTfLiteBool;
00118       break;
00119     case TensorType_COMPLEX64:
00120       *type = kTfLiteComplex64;
00121       break;
00122   }
00123   if (*type == kTfLiteNoType) {
00124     error_reporter->Report("Unsupported data type %d in tensor\n", tensor_type);
00125     return kTfLiteError;
00126   }
00127   return kTfLiteOk;
00128 }
00129 
00130 // Parse the appropriate data out of the op.
00131 //
00132 // This handles builtin data explicitly as there are flatbuffer schemas.
00133 // If it returns kTfLiteOk, it passes the data out with `builtin_data`, which
00134 // need to be released by calling `free`.`
00135 // If it returns kTfLiteError, `builtin_data` will be `nullptr`.
00136 TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
00137                          ErrorReporter* error_reporter,
00138                          BuiltinDataAllocator* allocator, void** builtin_data) {
00139   auto parse_padding = [](Padding padding) {
00140     switch (padding) {
00141       case Padding_SAME:
00142         return kTfLitePaddingSame;
00143       case Padding_VALID:
00144         return kTfLitePaddingValid;
00145     }
00146     return kTfLitePaddingUnknown;
00147   };
00148   auto parse_activation = [](ActivationFunctionType activation) {
00149     switch (activation) {
00150       case ActivationFunctionType_NONE:
00151         return kTfLiteActNone;
00152       case ActivationFunctionType_RELU:
00153         return kTfLiteActRelu;
00154       case ActivationFunctionType_RELU_N1_TO_1:
00155         return kTfLiteActRelu1;
00156       case ActivationFunctionType_RELU6:
00157         return kTfLiteActRelu6;
00158       case ActivationFunctionType_TANH:
00159         return kTfLiteActTanh;
00160       case ActivationFunctionType_SIGN_BIT:
00161         return kTfLiteActSignBit;
00162     }
00163     return kTfLiteActNone;
00164   };
00165   auto parseLSHProjectionType = [](LSHProjectionType type) {
00166     switch (type) {
00167       case LSHProjectionType_SPARSE:
00168         return kTfLiteLshProjectionSparse;
00169       case LSHProjectionType_DENSE:
00170         return kTfLiteLshProjectionDense;
00171       default:
00172         return kTfLiteLshProjectionUnknown;
00173     }
00174   };
00175   auto parseCombinerType = [](CombinerType type) {
00176     switch (type) {
00177       case CombinerType_MEAN:
00178         return kTfLiteCombinerTypeMean;
00179       case CombinerType_SQRTN:
00180         return kTfLiteCombinerTypeSqrtn;
00181       case CombinerType_SUM:
00182       default:
00183         return kTfLiteCombinerTypeSum;
00184     }
00185   };
00186 
00187   SafeBuiltinDataAllocator safe_allocator(allocator);
00188   *builtin_data = nullptr;
00189   switch (op_type) {
00190     case BuiltinOperator_CONV_2D: {
00191       auto params = safe_allocator.Allocate<TfLiteConvParams>();
00192       if (auto* conv_params = op->builtin_options_as_Conv2DOptions()) {
00193         params->padding = parse_padding(conv_params->padding());
00194         params->stride_width = conv_params->stride_w();
00195         params->stride_height = conv_params->stride_h();
00196         params->activation =
00197             parse_activation(conv_params->fused_activation_function());
00198 
00199         params->dilation_width_factor = conv_params->dilation_w_factor();
00200         params->dilation_height_factor = conv_params->dilation_h_factor();
00201       }
00202       *builtin_data = reinterpret_cast<void*>(params.release());
00203       break;
00204     }
00205     case BuiltinOperator_CAST: {
00206       auto params = safe_allocator.Allocate<TfLiteCastParams>();
00207       if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
00208         auto in_status =
00209             ConvertTensorType(schema_params->in_data_type(),
00210                               &params->in_data_type, error_reporter);
00211         auto out_status =
00212             ConvertTensorType(schema_params->out_data_type(),
00213                               &params->out_data_type, error_reporter);
00214         if (in_status != kTfLiteOk || out_status != kTfLiteOk) {
00215           return kTfLiteError;
00216         }
00217       }
00218       *builtin_data = reinterpret_cast<void*>(params.release());
00219       break;
00220     }
00221     case BuiltinOperator_LSH_PROJECTION: {
00222       auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
00223       if (const auto* lshParams =
00224               op->builtin_options_as_LSHProjectionOptions()) {
00225         params->type = parseLSHProjectionType(lshParams->type());
00226       }
00227       *builtin_data = reinterpret_cast<void*>(params.release());
00228       break;
00229     }
00230     case BuiltinOperator_AVERAGE_POOL_2D:
00231     case BuiltinOperator_MAX_POOL_2D:
00232     case BuiltinOperator_L2_POOL_2D: {
00233       auto params = safe_allocator.Allocate<TfLitePoolParams>();
00234       if (const auto* pool_params = op->builtin_options_as_Pool2DOptions()) {
00235         params->padding = parse_padding(pool_params->padding());
00236         params->stride_width = pool_params->stride_w();
00237         params->stride_height = pool_params->stride_h();
00238         params->filter_width = pool_params->filter_width();
00239         params->filter_height = pool_params->filter_height();
00240         params->activation =
00241             parse_activation(pool_params->fused_activation_function());
00242       }
00243       *builtin_data = reinterpret_cast<void*>(params.release());
00244       break;
00245     }
00246     case BuiltinOperator_DEPTHWISE_CONV_2D: {
00247       auto params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
00248       if (const auto* conv_params =
00249               op->builtin_options_as_DepthwiseConv2DOptions()) {
00250         params->padding = parse_padding(conv_params->padding());
00251         params->stride_width = conv_params->stride_w();
00252         params->stride_height = conv_params->stride_h();
00253         params->depth_multiplier = conv_params->depth_multiplier();
00254         params->activation =
00255             parse_activation(conv_params->fused_activation_function());
00256 
00257         params->dilation_width_factor = conv_params->dilation_w_factor();
00258         params->dilation_height_factor = conv_params->dilation_h_factor();
00259       }
00260       *builtin_data = reinterpret_cast<void*>(params.release());
00261       break;
00262     }
00263     case BuiltinOperator_SVDF: {
00264       auto params = safe_allocator.Allocate<TfLiteSVDFParams>();
00265       if (const auto* svdf_params = op->builtin_options_as_SVDFOptions()) {
00266         params->rank = svdf_params->rank();
00267         params->activation =
00268             parse_activation(svdf_params->fused_activation_function());
00269       }
00270       *builtin_data = reinterpret_cast<void*>(params.release());
00271       break;
00272     }
00273     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
00274       auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
00275       if (const auto* sequence_rnn_params =
00276               op->builtin_options_as_SequenceRNNOptions()) {
00277         params->activation =
00278             parse_activation(sequence_rnn_params->fused_activation_function());
00279         params->time_major = sequence_rnn_params->time_major();
00280       }
00281       *builtin_data = reinterpret_cast<void*>(params.release());
00282       break;
00283     }
00284     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
00285       auto params =
00286           safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
00287       if (const auto* bidi_sequence_rnn_params =
00288               op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
00289         params->activation = parse_activation(
00290             bidi_sequence_rnn_params->fused_activation_function());
00291         params->time_major = bidi_sequence_rnn_params->time_major();
00292         params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
00293       }
00294       *builtin_data = reinterpret_cast<void*>(params.release());
00295       break;
00296     }
00297     case BuiltinOperator_RNN: {
00298       auto params = safe_allocator.Allocate<TfLiteRNNParams>();
00299       if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
00300         params->activation =
00301             parse_activation(rnn_params->fused_activation_function());
00302       }
00303       *builtin_data = reinterpret_cast<void*>(params.release());
00304       break;
00305     }
00306     case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
00307       auto params =
00308           safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
00309       if (const auto* embedding_params =
00310               op->builtin_options_as_EmbeddingLookupSparseOptions()) {
00311         params->combiner = parseCombinerType(embedding_params->combiner());
00312       }
00313       *builtin_data = reinterpret_cast<void*>(params.release());
00314       break;
00315     }
00316     case BuiltinOperator_FULLY_CONNECTED: {
00317       auto params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
00318       if (const auto* fully_connected_params =
00319               op->builtin_options_as_FullyConnectedOptions()) {
00320         params->activation = parse_activation(
00321             fully_connected_params->fused_activation_function());
00322         params->keep_num_dims = fully_connected_params->keep_num_dims();
00323         switch (fully_connected_params->weights_format()) {
00324           case FullyConnectedOptionsWeightsFormat_DEFAULT:
00325             params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
00326             break;
00327           case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
00328             params->weights_format =
00329                 kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
00330             break;
00331           default:
00332             error_reporter->Report("Unhandled fully-connected weights format.");
00333             return kTfLiteError;
00334         }
00335       }
00336       *builtin_data = reinterpret_cast<void*>(params.release());
00337       break;
00338     }
00339     case BuiltinOperator_HASHTABLE_LOOKUP:
00340       // no-op.
00341       break;
00342     case BuiltinOperator_SOFTMAX: {
00343       auto params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
00344       if (const auto* softmax_params =
00345               op->builtin_options_as_SoftmaxOptions()) {
00346         params->beta = softmax_params->beta();
00347       }
00348       *builtin_data = reinterpret_cast<void*>(params.release());
00349       break;
00350     }
00351     case BuiltinOperator_CONCATENATION: {
00352       auto params = safe_allocator.Allocate<TfLiteConcatenationParams>();
00353       if (const auto* concatenation_params =
00354               op->builtin_options_as_ConcatenationOptions()) {
00355         params->activation =
00356             parse_activation(concatenation_params->fused_activation_function());
00357         params->axis = concatenation_params->axis();
00358       }
00359       *builtin_data = reinterpret_cast<void*>(params.release());
00360       break;
00361     }
00362     case BuiltinOperator_MUL: {
00363       auto params = safe_allocator.Allocate<TfLiteMulParams>();
00364       if (const auto* schema_params = op->builtin_options_as_MulOptions()) {
00365         params->activation =
00366             parse_activation(schema_params->fused_activation_function());
00367       }
00368       *builtin_data = reinterpret_cast<void*>(params.release());
00369       break;
00370     }
00371     case BuiltinOperator_ADD: {
00372       auto params = safe_allocator.Allocate<TfLiteAddParams>();
00373       if (const auto* schema_params = op->builtin_options_as_AddOptions()) {
00374         params->activation =
00375             parse_activation(schema_params->fused_activation_function());
00376       }
00377       *builtin_data = reinterpret_cast<void*>(params.release());
00378       break;
00379     }
00380     case BuiltinOperator_DIV: {
00381       auto params = safe_allocator.Allocate<TfLiteDivParams>();
00382       if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
00383         params->activation =
00384             parse_activation(schema_params->fused_activation_function());
00385       }
00386       *builtin_data = reinterpret_cast<void*>(params.release());
00387       break;
00388     }
00389     case BuiltinOperator_SUB: {
00390       auto params = safe_allocator.Allocate<TfLiteSubParams>();
00391       if (const auto* schema_params = op->builtin_options_as_SubOptions()) {
00392         params->activation =
00393             parse_activation(schema_params->fused_activation_function());
00394       }
00395       *builtin_data = reinterpret_cast<void*>(params.release());
00396       break;
00397     }
00398     case BuiltinOperator_L2_NORMALIZATION: {
00399       auto params = safe_allocator.Allocate<TfLiteL2NormParams>();
00400       if (const auto* schema_params = op->builtin_options_as_L2NormOptions()) {
00401         params->activation =
00402             parse_activation(schema_params->fused_activation_function());
00403       }
00404       *builtin_data = reinterpret_cast<void*>(params.release());
00405       break;
00406     }
00407     case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
00408       auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
00409       if (const auto* schema_params =
00410               op->builtin_options_as_LocalResponseNormalizationOptions()) {
00411         params->radius = schema_params->radius();
00412         params->bias = schema_params->bias();
00413         params->alpha = schema_params->alpha();
00414         params->beta = schema_params->beta();
00415       }
00416       *builtin_data = reinterpret_cast<void*>(params.release());
00417       break;
00418     }
00419     case BuiltinOperator_LSTM: {
00420       auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
00421       if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
00422         params->activation =
00423             parse_activation(lstm_params->fused_activation_function());
00424         params->cell_clip = lstm_params->cell_clip();
00425         params->proj_clip = lstm_params->proj_clip();
00426         switch (lstm_params->kernel_type()) {
00427           case LSTMKernelType_FULL:
00428             params->kernel_type = kTfLiteLSTMFullKernel;
00429             break;
00430           case LSTMKernelType_BASIC:
00431             params->kernel_type = kTfLiteLSTMBasicKernel;
00432             break;
00433           default:
00434             error_reporter->Report("Unhandled LSTM kernel type: %d",
00435                                    lstm_params->kernel_type());
00436             return kTfLiteError;
00437         }
00438       } else {
00439         error_reporter->Report("No valid LSTM builtin options exist");
00440         return kTfLiteError;
00441       }
00442       *builtin_data = reinterpret_cast<void*>(params.release());
00443       break;
00444     }
00445     case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
00446       auto params =
00447           safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
00448       if (const auto* seq_lstm_params =
00449               op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
00450         params->activation =
00451             parse_activation(seq_lstm_params->fused_activation_function());
00452         params->cell_clip = seq_lstm_params->cell_clip();
00453         params->proj_clip = seq_lstm_params->proj_clip();
00454         params->time_major = seq_lstm_params->time_major();
00455       }
00456       *builtin_data = reinterpret_cast<void*>(params.release());
00457       break;
00458     }
00459     case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
00460       auto params =
00461           safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
00462       if (const auto* bidi_lstm_params =
00463               op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
00464         params->activation =
00465             parse_activation(bidi_lstm_params->fused_activation_function());
00466         params->cell_clip = bidi_lstm_params->cell_clip();
00467         params->proj_clip = bidi_lstm_params->proj_clip();
00468         params->merge_outputs = bidi_lstm_params->merge_outputs();
00469         params->time_major = bidi_lstm_params->time_major();
00470       }
00471       *builtin_data = reinterpret_cast<void*>(params.release());
00472       break;
00473     }
00474     case BuiltinOperator_RESIZE_BILINEAR: {
00475       auto params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
00476       if (const auto* schema_params =
00477               op->builtin_options_as_ResizeBilinearOptions()) {
00478         params->align_corners = schema_params->align_corners();
00479       }
00480       *builtin_data = reinterpret_cast<void*>(params.release());
00481       break;
00482     }
00483     case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
00484       // Large functions confuse MacOS builds with XCode 8 so a lambda is
00485       // required to minimize function size. TODO(b/118447267): Simplify
00486       // ParseOpData function and reduce its length.
00487       [&]() {
00488         auto params =
00489             safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
00490         if (const auto* schema_params =
00491                 op->builtin_options_as_ResizeNearestNeighborOptions()) {
00492           params->align_corners = schema_params->align_corners();
00493         }
00494         *builtin_data = reinterpret_cast<void*>(params.release());
00495       }();
00496       break;
00497     }
00498     case BuiltinOperator_RESHAPE: {
00499       auto params = safe_allocator.Allocate<TfLiteReshapeParams>();
00500       if (const auto* schema_params = op->builtin_options_as_ReshapeOptions()) {
00501         auto* new_shape = schema_params->new_shape();
00502         TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
00503             sizeof(params->shape), new_shape, params->shape, error_reporter,
00504             "reshape"));
00505         params->num_dimensions = new_shape->size();
00506       }
00507       *builtin_data = reinterpret_cast<void*>(params.release());
00508       break;
00509     }
00510     case BuiltinOperator_SKIP_GRAM: {
00511       auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
00512       if (const auto* skip_gram_params =
00513               op->builtin_options_as_SkipGramOptions()) {
00514         params->ngram_size = skip_gram_params->ngram_size();
00515         params->max_skip_size = skip_gram_params->max_skip_size();
00516         params->include_all_ngrams = skip_gram_params->include_all_ngrams();
00517       }
00518       *builtin_data = reinterpret_cast<void*>(params.release());
00519       break;
00520     }
00521     case BuiltinOperator_SPACE_TO_DEPTH: {
00522       auto params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
00523       if (const auto* schema_params =
00524               op->builtin_options_as_SpaceToDepthOptions()) {
00525         params->block_size = schema_params->block_size();
00526       }
00527       *builtin_data = reinterpret_cast<void*>(params.release());
00528       break;
00529     }
00530     case BuiltinOperator_DEPTH_TO_SPACE: {
00531       auto params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
00532       if (const auto* schema_params =
00533               op->builtin_options_as_DepthToSpaceOptions()) {
00534         params->block_size = schema_params->block_size();
00535       }
00536       *builtin_data = reinterpret_cast<void*>(params.release());
00537       break;
00538     }
00539     case BuiltinOperator_GATHER: {
00540       auto params = safe_allocator.Allocate<TfLiteGatherParams>();
00541       params->axis = 0;
00542       if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
00543         params->axis = gather_params->axis();
00544       }
00545 
00546       *builtin_data = reinterpret_cast<void*>(params.release());
00547       break;
00548     }
00549     case BuiltinOperator_MEAN:
00550     case BuiltinOperator_REDUCE_MAX:
00551     case BuiltinOperator_REDUCE_MIN:
00552     case BuiltinOperator_REDUCE_PROD:
00553     case BuiltinOperator_REDUCE_ANY:
00554     case BuiltinOperator_SUM: {
00555       auto params = safe_allocator.Allocate<TfLiteReducerParams>();
00556       if (const auto* schema_params = op->builtin_options_as_ReducerOptions()) {
00557         params->keep_dims = schema_params->keep_dims();
00558       }
00559       *builtin_data = reinterpret_cast<void*>(params.release());
00560       break;
00561     }
00562     case BuiltinOperator_SPLIT: {
00563       auto params = safe_allocator.Allocate<TfLiteSplitParams>();
00564       if (const auto* schema_params = op->builtin_options_as_SplitOptions()) {
00565         params->num_splits = schema_params->num_splits();
00566       }
00567       *builtin_data = reinterpret_cast<void*>(params.release());
00568       break;
00569     }
00570     case BuiltinOperator_SPLIT_V: {
00571       auto params = safe_allocator.Allocate<TfLiteSplitParams>();
00572       if (const auto* schema_params = op->builtin_options_as_SplitVOptions()) {
00573         params->num_splits = schema_params->num_splits();
00574       }
00575       *builtin_data = reinterpret_cast<void*>(params.release());
00576       break;
00577     }
00578     case BuiltinOperator_SQUEEZE: {
00579       auto params = safe_allocator.Allocate<TfLiteSqueezeParams>();
00580       if (const auto* schema_params = op->builtin_options_as_SqueezeOptions()) {
00581         const auto& squeeze_dims = schema_params->squeeze_dims();
00582         TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
00583             sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
00584             error_reporter, "squeeze"));
00585         params->num_squeeze_dims = squeeze_dims->size();
00586       }
00587       *builtin_data = reinterpret_cast<void*>(params.release());
00588       break;
00589     }
00590     case BuiltinOperator_STRIDED_SLICE: {
00591       auto params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
00592       if (const auto* schema_params =
00593               op->builtin_options_as_StridedSliceOptions()) {
00594         params->begin_mask = schema_params->begin_mask();
00595         params->end_mask = schema_params->end_mask();
00596         params->ellipsis_mask = schema_params->ellipsis_mask();
00597         params->new_axis_mask = schema_params->new_axis_mask();
00598         params->shrink_axis_mask = schema_params->shrink_axis_mask();
00599       }
00600       *builtin_data = reinterpret_cast<void*>(params.release());
00601       break;
00602     }
00603     case BuiltinOperator_ARG_MAX: {
00604       auto params = safe_allocator.Allocate<TfLiteArgMaxParams>();
00605       if (const auto* schema_params = op->builtin_options_as_ArgMaxOptions()) {
00606         ConvertTensorType(schema_params->output_type(), &params->output_type,
00607                           error_reporter);
00608       }
00609       *builtin_data = reinterpret_cast<void*>(params.release());
00610       break;
00611     }
00612     case BuiltinOperator_ARG_MIN: {
00613       auto params = safe_allocator.Allocate<TfLiteArgMinParams>();
00614       if (const auto* schema_params = op->builtin_options_as_ArgMinOptions()) {
00615         ConvertTensorType(schema_params->output_type(), &params->output_type,
00616                           error_reporter);
00617       }
00618       *builtin_data = reinterpret_cast<void*>(params.release());
00619       break;
00620     }
00621     case BuiltinOperator_TRANSPOSE_CONV: {
00622       auto params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
00623       if (const auto* transpose_conv_params =
00624               op->builtin_options_as_TransposeConvOptions()) {
00625         params->padding = parse_padding(transpose_conv_params->padding());
00626         params->stride_width = transpose_conv_params->stride_w();
00627         params->stride_height = transpose_conv_params->stride_h();
00628       }
00629       *builtin_data = reinterpret_cast<void*>(params.release());
00630       break;
00631     }
00632     case BuiltinOperator_SPARSE_TO_DENSE: {
00633       auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
00634       if (const auto* sparse_to_dense_params =
00635               op->builtin_options_as_SparseToDenseOptions()) {
00636         params->validate_indices = sparse_to_dense_params->validate_indices();
00637       }
00638       *builtin_data = reinterpret_cast<void*>(params.release());
00639       break;
00640     }
00641     case BuiltinOperator_SHAPE: {
00642       auto params = safe_allocator.Allocate<TfLiteShapeParams>();
00643       if (const auto* schema_params = op->builtin_options_as_ShapeOptions()) {
00644         ConvertTensorType(schema_params->out_type(), &params->out_type,
00645                           error_reporter);
00646       }
00647       *builtin_data = static_cast<void*>(params.release());
00648       break;
00649     }
00650     case BuiltinOperator_PACK: {
00651       auto params = safe_allocator.Allocate<TfLitePackParams>();
00652       if (const auto* pack_params = op->builtin_options_as_PackOptions()) {
00653         params->values_count = pack_params->values_count();
00654         params->axis = pack_params->axis();
00655       }
00656       *builtin_data = reinterpret_cast<void*>(params.release());
00657       break;
00658     }
00659     case BuiltinOperator_DELEGATE: {
00660       // TODO(ycling): Revisit when supporting saving delegated models.
00661       error_reporter->Report("DELEGATE op shouldn't exist in model.");
00662       return kTfLiteError;
00663     }
00664     case BuiltinOperator_FAKE_QUANT: {
00665       auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
00666       if (const auto* schema_params =
00667               op->builtin_options_as_FakeQuantOptions()) {
00668         params->min = schema_params->min();
00669         params->max = schema_params->max();
00670         params->num_bits = schema_params->num_bits();
00671         params->narrow_range = schema_params->narrow_range();
00672       }
00673       *builtin_data = static_cast<void*>(params.release());
00674       break;
00675     }
00676     case BuiltinOperator_ONE_HOT: {
00677       auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
00678       if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
00679         params->axis = schema_params->axis();
00680       }
00681       *builtin_data = static_cast<void*>(params.release());
00682       break;
00683     }
00684     case BuiltinOperator_UNPACK: {
00685       auto params = safe_allocator.Allocate<TfLiteUnpackParams>();
00686       if (const auto* unpack_params = op->builtin_options_as_UnpackOptions()) {
00687         params->num = unpack_params->num();
00688         params->axis = unpack_params->axis();
00689       }
00690       *builtin_data = reinterpret_cast<void*>(params.release());
00691       break;
00692     }
00693     case BuiltinOperator_LEAKY_RELU: {
00694       auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
00695       if (const auto* leaky_relu_params =
00696               op->builtin_options_as_LeakyReluOptions()) {
00697         params->alpha = leaky_relu_params->alpha();
00698       }
00699       *builtin_data = reinterpret_cast<void*>(params.release());
00700       break;
00701     }
00702     case BuiltinOperator_MIRROR_PAD: {
00703       auto params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
00704       const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
00705       if (mirror_pad_params != nullptr) {
00706         params->mode =
00707             mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
00708                 ? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
00709                 : TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
00710       }
00711       *builtin_data = reinterpret_cast<void*>(params.release());
00712       break;
00713     }
00714     case BuiltinOperator_UNIQUE: {
00715       auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
00716       const auto* unique_params = op->builtin_options_as_UniqueOptions();
00717       if (unique_params != nullptr) {
00718         params->index_out_type =
00719             unique_params->idx_out_type() == tflite::TensorType_INT64
00720                 ? TfLiteType::kTfLiteInt64
00721                 : TfLiteType::kTfLiteInt32;
00722       }
00723       *builtin_data = reinterpret_cast<void*>(params.release());
00724       break;
00725     }
00726     case BuiltinOperator_REVERSE_SEQUENCE: {
00727       auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
00728       if (const auto* reverse_seq_params =
00729               op->builtin_options_as_ReverseSequenceOptions()) {
00730         params->seq_dim = reverse_seq_params->seq_dim();
00731         params->batch_dim = reverse_seq_params->batch_dim();
00732       }
00733       *builtin_data = reinterpret_cast<void*>(params.release());
00734       break;
00735     }
00736     case BuiltinOperator_IF: {
00737       TfLiteIfParams* params = allocator->AllocatePOD<TfLiteIfParams>();
00738       if (const auto* if_params = op->builtin_options_as_IfOptions()) {
00739         params->then_subgraph_index = if_params->then_subgraph_index();
00740         params->else_subgraph_index = if_params->else_subgraph_index();
00741       }
00742       *builtin_data = reinterpret_cast<void*>(params);
00743       break;
00744     }
00745     case BuiltinOperator_WHILE: {
00746       TfLiteWhileParams* params = allocator->AllocatePOD<TfLiteWhileParams>();
00747       if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
00748         params->cond_subgraph_index = while_params->cond_subgraph_index();
00749         params->body_subgraph_index = while_params->body_subgraph_index();
00750       }
00751       *builtin_data = reinterpret_cast<void*>(params);
00752       break;
00753     }
00754     // Below are the ops with no builtin_data structure.
00755     case BuiltinOperator_ABS:
00756     case BuiltinOperator_BATCH_TO_SPACE_ND:
00757     // TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
00758     // ok for now, since there is no call implementation either.
00759     case BuiltinOperator_CALL:
00760     case BuiltinOperator_CONCAT_EMBEDDINGS:
00761     case BuiltinOperator_COS:
00762     case BuiltinOperator_CUSTOM:
00763     case BuiltinOperator_DEQUANTIZE:
00764     case BuiltinOperator_ELU:
00765     case BuiltinOperator_EMBEDDING_LOOKUP:
00766     case BuiltinOperator_EQUAL:
00767     case BuiltinOperator_EXP:
00768     case BuiltinOperator_EXPAND_DIMS:
00769     case BuiltinOperator_CEIL:
00770     case BuiltinOperator_FLOOR:
00771     case BuiltinOperator_GREATER:
00772     case BuiltinOperator_GREATER_EQUAL:
00773     case BuiltinOperator_HARD_SWISH:
00774     case BuiltinOperator_LESS:
00775     case BuiltinOperator_LESS_EQUAL:
00776     case BuiltinOperator_LOG:
00777     case BuiltinOperator_LOGISTIC:
00778     case BuiltinOperator_LOG_SOFTMAX:
00779     case BuiltinOperator_MATRIX_DIAG:
00780     case BuiltinOperator_MATRIX_SET_DIAG:
00781     case BuiltinOperator_MAXIMUM:
00782     case BuiltinOperator_MINIMUM:
00783     case BuiltinOperator_NEG:
00784     case BuiltinOperator_NOT_EQUAL:
00785     case BuiltinOperator_PAD:
00786     case BuiltinOperator_PADV2:
00787     case BuiltinOperator_PRELU:
00788     case BuiltinOperator_RELU:
00789     case BuiltinOperator_RELU6:
00790     case BuiltinOperator_RELU_N1_TO_1:
00791     case BuiltinOperator_ROUND:
00792     case BuiltinOperator_RSQRT:
00793     case BuiltinOperator_SELECT:
00794     case BuiltinOperator_SIN:
00795     case BuiltinOperator_SLICE:
00796     case BuiltinOperator_SPACE_TO_BATCH_ND:
00797     case BuiltinOperator_SQRT:
00798     case BuiltinOperator_TANH:
00799     case BuiltinOperator_TILE:
00800     case BuiltinOperator_TOPK_V2:
00801     case BuiltinOperator_TRANSPOSE:
00802     case BuiltinOperator_POW:
00803     case BuiltinOperator_LOGICAL_OR:
00804     case BuiltinOperator_LOGICAL_AND:
00805     case BuiltinOperator_LOGICAL_NOT:
00806     case BuiltinOperator_FLOOR_DIV:
00807     case BuiltinOperator_SQUARE:
00808     case BuiltinOperator_ZEROS_LIKE:
00809     case BuiltinOperator_FILL:
00810     case BuiltinOperator_FLOOR_MOD:
00811     case BuiltinOperator_RANGE:
00812     case BuiltinOperator_SQUARED_DIFFERENCE:
00813     case BuiltinOperator_REVERSE_V2:
00814     case BuiltinOperator_ADD_N:
00815     case BuiltinOperator_GATHER_ND:
00816     case BuiltinOperator_WHERE:
00817     case BuiltinOperator_RANK:
00818     case BuiltinOperator_QUANTIZE:
00819     case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
00820     case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
00821     case BuiltinOperator_SCATTER_ND:
00822       break;
00823   }
00824   return kTfLiteOk;
00825 }  // NOLINT[readability/fn_size]
00826 
00827 }  // namespace tflite