Daniel Konegen / MNIST_example

Dependencies:   mbed-os

Embed: (wiki syntax)

« Back to documentation index

Show/hide line numbers svdf.cc Source File

svdf.cc

00001 /* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
00002 
00003 Licensed under the Apache License, Version 2.0 (the "License");
00004 you may not use this file except in compliance with the License.
00005 You may obtain a copy of the License at
00006 
00007     http://www.apache.org/licenses/LICENSE-2.0
00008 
00009 Unless required by applicable law or agreed to in writing, software
00010 distributed under the License is distributed on an "AS IS" BASIS,
00011 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
00012 See the License for the specific language governing permissions and
00013 limitations under the License.
00014 ==============================================================================*/
00015 
00016 #include <math.h>
00017 
00018 #include "tensorflow/lite/c/builtin_op_data.h"
00019 #include "tensorflow/lite/c/c_api_internal.h"
00020 #include "tensorflow/lite/experimental/micro/kernels/activation_utils.h"
00021 #include "tensorflow/lite/experimental/micro/micro_utils.h"
00022 #include "tensorflow/lite/kernels/internal/common.h"
00023 #include "tensorflow/lite/kernels/internal/quantization_util.h"
00024 #include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
00025 #include "tensorflow/lite/kernels/kernel_util.h"
00026 #include "tensorflow/lite/kernels/op_macros.h"
00027 
00028 namespace tflite {
00029 namespace ops {
00030 namespace micro {
00031 namespace svdf {
00032 namespace {
00033 
00034 /**
00035  * This version of SVDF is specific to TFLite Micro. It contains the following
00036  * differences between the TFLite version:
00037  *
00038  * 1.) Scratch tensor allocation - scratch tensors must be known ahead of time
00039  * for the Micro interpreter.
00040  * 2.) Output dimensions - the TFLite version determines output size and runtime
00041  * and resizes the output tensor. Micro runtime does not support tensor
00042  * resizing.
00043  */
00044 
00045 // TODO(kreeger): upstream these reference methods into
00046 // `lite/kernels/reference/svdf.h`
00047 
00048 static inline void ApplyTimeWeightsBiasAndActivation(
00049     int batch_size, int memory_size, int num_filters, int num_units, int rank,
00050     const TfLiteTensor* weights_time, const TfLiteTensor* bias,
00051     TfLiteFusedActivation activation, TfLiteTensor* activation_state,
00052     TfLiteTensor* scratch, TfLiteTensor* output) {
00053   // Compute matmul(activation_state, weights_time).
00054   // The rightmost column is used to save temporary output (with the size of
00055   // num_filters). This is achieved by starting at
00056   // GetTensorData<float>(activation_state), and having the stride equal to
00057   // memory_size.
00058   for (int b = 0; b < batch_size; ++b) {
00059     // Perform batched vector dot product:
00060     float* scratch_ptr_batch = GetTensorData<float>(scratch) + b * num_filters;
00061     const float* vector1_ptr = GetTensorData<float>(weights_time);
00062     const float* vector2_ptr =
00063         GetTensorData<float>(activation_state) + b * memory_size * num_filters;
00064     for (int i = 0; i < num_filters; ++i) {
00065       *scratch_ptr_batch = 0.f;
00066       for (int j = 0; j < memory_size; ++j) {
00067         *scratch_ptr_batch += *vector1_ptr++ * *vector2_ptr++;
00068       }
00069       scratch_ptr_batch++;
00070     }
00071   }
00072 
00073   // Initialize output with bias if provided.
00074   if (bias) {
00075     // TODO(kreeger): doc me - VectorBatchVectorAssign
00076     const float* bias_data = GetTensorData<float>(bias);
00077     float* output_data = GetTensorData<float>(output);
00078     for (int i = 0; i < batch_size; ++i) {
00079       float* output_ptr = output_data + i * num_units;
00080       const float* bias_ptr = bias_data;
00081       for (int j = 0; j < num_units; ++j) {
00082         *output_ptr++ = *bias_ptr++;
00083       }
00084     }
00085   } else {
00086     float* output_data = GetTensorData<float>(output);
00087     for (int i = 0; i < batch_size * num_units; ++i) {
00088       *output_data++ = 0.0f;
00089     }
00090   }
00091 
00092   // Reduction sum.
00093   for (int b = 0; b < batch_size; ++b) {
00094     float* output_ptr_batch = GetTensorData<float>(output) + b * num_units;
00095     float* scratch_ptr_batch = GetTensorData<float>(scratch) + b * num_filters;
00096 
00097     // Reduction sum vector
00098     const float* input_vector_ptr = scratch_ptr_batch;
00099     for (int i = 0; i < num_units; ++i) {
00100       for (int j = 0; j < rank; j++) {
00101         output_ptr_batch[i] += *input_vector_ptr++;
00102       }
00103     }
00104   }
00105 
00106   // Apply activation.
00107   for (int b = 0; b < batch_size; ++b) {
00108     float* output_ptr_batch = GetTensorData<float>(output) + b * num_units;
00109     for (int i = 0; i < num_units; ++i) {
00110       *output_ptr_batch = ActivationValFloat(activation, *output_ptr_batch);
00111       ++output_ptr_batch;
00112     }
00113   }
00114 
00115   // Left shift the activation_state to make room for next cycle's activation.
00116   // TODO(alanchiao): explore collapsing this into a single loop.
00117   for (int b = 0; b < batch_size; ++b) {
00118     float* state_ptr_batch =
00119         GetTensorData<float>(activation_state) + b * memory_size * num_filters;
00120     for (int f = 0; f < num_filters; ++f) {
00121       // Shift the vector left:
00122       float* batch_ptr = state_ptr_batch;
00123       float* batch_start = state_ptr_batch + 1;
00124       float* batch_end = state_ptr_batch + memory_size;
00125       while (batch_start != batch_end) {
00126         *batch_ptr++ = *batch_start++;
00127       }
00128       state_ptr_batch[memory_size - 1] = 0.0f;
00129       state_ptr_batch += memory_size;
00130     }
00131   }
00132 }
00133 
00134 inline void EvalFloatSVDF(TfLiteContext* context, TfLiteNode* node,
00135                           const TfLiteTensor* input,
00136                           const TfLiteTensor* weights_feature,
00137                           const TfLiteTensor* weights_time,
00138                           const TfLiteTensor* bias,
00139                           const TfLiteSVDFParams* params, TfLiteTensor* scratch,
00140                           TfLiteTensor* activation_state,
00141                           TfLiteTensor* output) {
00142   const int rank = params->rank;
00143   const int batch_size = input->dims->data[0];
00144   const int input_size = input->dims->data[1];
00145   const int num_filters = weights_feature->dims->data[0];
00146   const int num_units = num_filters / rank;
00147   const int memory_size = weights_time->dims->data[1];
00148 
00149   // Clear the activation (activation_state's leftmost column).
00150   // TODO(ghodrat): Add a test which initialize activation_state with invalid
00151   // values in leftmost column and make sure it passes.
00152   for (int b = 0; b < batch_size; ++b) {
00153     float* state_ptr_batch =
00154         GetTensorData<float>(activation_state) + b * memory_size * num_filters;
00155     for (int c = 0; c < num_filters; ++c) {
00156       float* state_ptr = state_ptr_batch + c * memory_size;
00157       state_ptr[memory_size - 1] = 0.0f;
00158     }
00159   }
00160 
00161   // Compute conv1d(inputs, weights_feature).
00162   // The activation_state's rightmost column is used to save current cycle
00163   // activation. This is achieved by starting at
00164   // GetTensorData<float>(activation_state)[memory_size - 1] and having the
00165   // stride equal to memory_size.
00166 
00167   // Perform batched matrix vector multiply accumulate operation:
00168   const float* matrix = GetTensorData<float>(weights_feature);
00169   const float* vector = GetTensorData<float>(input);
00170   float* result = &GetTensorData<float>(activation_state)[memory_size - 1];
00171   float* result_in_batch = result;
00172   for (int i = 0; i < batch_size; ++i) {
00173     const float* matrix_ptr = matrix;
00174     for (int j = 0; j < num_filters; ++j) {
00175       float dot_prod = 0.0f;
00176       const float* vector_in_batch = vector + i * input_size;
00177       for (int k = 0; k < input_size; ++k) {
00178         dot_prod += *matrix_ptr++ * *vector_in_batch++;
00179       }
00180       *result_in_batch += dot_prod;
00181       result_in_batch += memory_size;
00182     }
00183   }
00184 
00185   ApplyTimeWeightsBiasAndActivation(
00186       batch_size, memory_size, num_filters, num_units, rank, weights_time, bias,
00187       params->activation, activation_state, scratch, output);
00188 }
00189 
00190 inline void EvalHybridSVDF(
00191     TfLiteContext* context, TfLiteNode* node, const TfLiteTensor* input,
00192     const TfLiteTensor* weights_feature, const TfLiteTensor* weights_time,
00193     const TfLiteTensor* bias, const TfLiteSVDFParams* params,
00194     TfLiteTensor* scratch, TfLiteTensor* scaling_factors,
00195     TfLiteTensor* input_quantized, TfLiteTensor* activation_state,
00196     TfLiteTensor* output) {
00197   const int rank = params->rank;
00198   const int batch_size = input->dims->data[0];
00199   const int input_size = input->dims->data[1];
00200   const int num_filters = weights_feature->dims->data[0];
00201   const int num_units = num_filters / rank;
00202   const int memory_size = weights_time->dims->data[1];
00203 
00204   // Initialize the pointer to input.
00205   const float* input_ptr_batch = GetTensorData<float>(input);
00206 
00207   int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>(input_quantized);
00208   const int8_t* weights_feature_ptr = GetTensorData<int8_t>(weights_feature);
00209 
00210   // Initialize the pointer to storage for scaling factors.
00211   float* scaling_factors_ptr = GetTensorData<float>(scaling_factors);
00212 
00213   // Initialize the weights scale.
00214   const float weights_feature_scale = weights_feature->params.scale;
00215 
00216   // Clear the activation (activation_state's leftmost column).
00217   // TODO(ghodrat): Add a test which initialize activation_state with invalid
00218   // values in the leftmost column and make sure it passes.
00219   // TODO(kreeger): Use a port of tensor_utils when ready (b/140272187).
00220   for (int b = 0; b < batch_size; ++b) {
00221     float* state_ptr_batch =
00222         GetTensorData<float>(activation_state) + b * memory_size * num_filters;
00223     for (int c = 0; c < num_filters; ++c) {
00224       float* state_ptr = state_ptr_batch + c * memory_size;
00225       state_ptr[memory_size - 1] = 0.0;
00226     }
00227   }
00228 
00229   // Determine if input pointer batch is a zero based vector:
00230   bool is_zero_vector = true;
00231   for (int i = 0; i < batch_size * input_size && is_zero_vector; ++i) {
00232     if (input_ptr_batch[i] != 0.0f) {
00233       is_zero_vector = false;
00234     }
00235   }
00236 
00237   if (!is_zero_vector) {
00238     SignedSymmetricPerChannelQuantize(input_ptr_batch, input->dims, 0,
00239                                       quantized_input_ptr_batch,
00240                                       scaling_factors_ptr);
00241 
00242     // Quantize input from float to int8.
00243     for (int b = 0; b < batch_size; ++b) {
00244       scaling_factors_ptr[b] *= weights_feature_scale;
00245     }
00246 
00247     // Compute conv1d(inputs, weights_feature).
00248     // The rightmost column of activation_state is used to save the current
00249     // cycle activation. This is achieved by starting at
00250     // GetTensorData<float>(activation_state)[memory_size - 1] and having the
00251     // stride equal to memory_size. (Matrix batch vector multiply accumulate)
00252     float* result = &GetTensorData<float>(activation_state)[memory_size - 1];
00253     for (int i = 0; i < batch_size;
00254          ++i, quantized_input_ptr_batch += input_size) {
00255       const float batch_scaling_factor = scaling_factors_ptr[i];
00256 
00257       // Get the address of the first row:
00258       const int8_t* row_ptr = weights_feature_ptr;
00259       for (int j = 0; j < num_filters; ++j, result += memory_size) {
00260         // Initialize the dot product sum for the row to 0.
00261         int32_t dotprod = 0;
00262         for (int k = 0; k < input_size; ++k, ++row_ptr) {
00263           dotprod += (*row_ptr) * (quantized_input_ptr_batch[k]);
00264         }
00265         *result += dotprod * batch_scaling_factor;
00266       }
00267     }
00268   }
00269 
00270   // TODO(alanchiao): can optimize hybrid case ~5% by unrolling loop in applying
00271   // time weights so that the inner loop multiplies eight elements at a time.
00272   ApplyTimeWeightsBiasAndActivation(
00273       batch_size, memory_size, num_filters, num_units, rank, weights_time, bias,
00274       params->activation, activation_state, scratch, output);
00275 }
00276 
00277 }  // namespace
00278 
00279 // Input tensors.
00280 constexpr int kInputTensor = 0;
00281 constexpr int kWeightsFeatureTensor = 1;
00282 constexpr int kWeightsTimeTensor = 2;
00283 constexpr int kBiasTensor = 3;
00284 // This is a variable tensor, and will be modified by this op.
00285 constexpr int kInputActivationStateTensor = 4;
00286 
00287 // Output tensor.
00288 constexpr int kOutputTensor = 0;
00289 
00290 void* Init(TfLiteContext* context, const char* buffer, size_t length) {
00291   return nullptr;
00292 }
00293 
00294 void Free(TfLiteContext* context, void* buffer) {}
00295 
00296 TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
00297   const auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
00298 
00299   // Validate Tensor Inputs (dtype depends on quantization):
00300   // [0] = Input, {2, batch_size, input_size}
00301   // [1] = Weights Feature, {2, num_filters, input_size}
00302   // [2] = Weights Time, {2, num_filters, memory_size}
00303   // [3] = Bias (optional), {1, num_units}
00304   // [4] = Activation State (variable),
00305   //         {2, batch_size, memory_size * num_filters}
00306   // TODO(kreeger): Use input tensor as variable until scratch tensor allocation
00307   // has been implemented (cl/263032056)
00308   // TF_LITE_ENSURE_EQ(context, node->inputs->size, 5);
00309   TF_LITE_ENSURE_EQ(context, node->inputs->size, 6);
00310   const TfLiteTensor* input = GetInput(context, node, kInputTensor);
00311   const TfLiteTensor* weights_feature =
00312       GetInput(context, node, kWeightsFeatureTensor);
00313   const TfLiteTensor* weights_time =
00314       GetInput(context, node, kWeightsTimeTensor);
00315   const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
00316   TfLiteTensor* activation_state =
00317       &context->tensors[node->inputs->data[kInputActivationStateTensor]];
00318 
00319   // Define input constants based on input tensor definition above:
00320   const int rank = params->rank;
00321   const int input_size = input->dims->data[1];
00322   const int batch_size = input->dims->data[0];
00323   const int num_filters = weights_feature->dims->data[0];
00324   TF_LITE_ENSURE_EQ(context, num_filters % rank, 0);
00325   const int num_units = num_filters / rank;
00326   const int memory_size = weights_time->dims->data[1];
00327 
00328   // Validate Input Tensor:
00329   TF_LITE_ENSURE_EQ(context, input->type, kTfLiteFloat32);
00330   TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2);
00331 
00332   // Validate Weights Feature Input Tensor:
00333   TF_LITE_ENSURE_EQ(context, NumDimensions(weights_feature), 2);
00334   TF_LITE_ENSURE_EQ(context, weights_feature->dims->data[1], input_size);
00335 
00336   // Validate Weights Time Input Tensor:
00337   TF_LITE_ENSURE_EQ(context, NumDimensions(weights_time), 2);
00338   TF_LITE_ENSURE_EQ(context, weights_time->dims->data[0], num_filters);
00339   TF_LITE_ENSURE_EQ(context, weights_time->dims->data[1], memory_size);
00340 
00341   // Validate Optional Bias Input Tensor:
00342   if (bias) {
00343     TF_LITE_ENSURE_EQ(context, bias->dims->data[0], num_units);
00344     TF_LITE_ENSURE_EQ(context, bias->type, kTfLiteFloat32);
00345   }
00346 
00347   // Validate Activation State Input Tensor:
00348   TF_LITE_ENSURE_EQ(context, activation_state->type, kTfLiteFloat32);
00349   TF_LITE_ENSURE_EQ(context, NumDimensions(activation_state), 2);
00350   TF_LITE_ENSURE_EQ(context, activation_state->dims->data[0], batch_size);
00351   TF_LITE_ENSURE_EQ(context, activation_state->dims->data[1],
00352                     memory_size * num_filters);
00353 
00354   // Validate shared Scratch Tensor (same for full float and hybrid):
00355   // [0] = Holds dot-product of time-forward calculations in
00356   //       ApplyTimeWeightsBiasAndActivation():
00357   //         float, {2, batch_size, num_filters}
00358   // TODO(kreeger): Use input tensor as variable until scratch tensor allocation
00359   // has been implemented (cl/263032056)
00360   // TfLiteTensor* scratch_tensor = GetTemporary(context, node, 0);
00361   TfLiteTensor* scratch_tensor = &context->tensors[node->inputs->data[5]];
00362 
00363   TF_LITE_ENSURE_EQ(context, scratch_tensor->type, kTfLiteFloat32);
00364   TF_LITE_ENSURE_EQ(context, NumDimensions(scratch_tensor), 2);
00365   TF_LITE_ENSURE_EQ(context, scratch_tensor->dims->data[0], batch_size);
00366   TF_LITE_ENSURE_EQ(context, scratch_tensor->dims->data[1], num_filters);
00367 
00368   // The weights are of consistent type, so it suffices to check one.
00369   const bool is_hybrid_op = IsHybridOp(input, weights_feature);
00370   // TODO(kreeger): Handle full quant svdf b/139435798
00371   if (is_hybrid_op) {
00372     // Validate Input Tensor dtypes:
00373     TF_LITE_ENSURE(context, weights_feature->type == kTfLiteUInt8 ||
00374                                 weights_feature->type == kTfLiteInt8);
00375     TF_LITE_ENSURE(context, weights_time->type == kTfLiteUInt8 ||
00376                                 weights_time->type == kTfLiteInt8);
00377 
00378     // Validate Scratch Tensors:
00379     // [0] = (shared - see above for usage)
00380     // [1] = Input Quantized, int8_t/uint8_t, {2, batch_size, input_size}
00381     // [2] = Scaling Factors, float, {1, batch_size}
00382     // [3] = Float Weights Time, float, {2, num_filters, memory_size}
00383     TF_LITE_ENSURE_EQ(context, node->temporaries->size, 4);
00384     TfLiteTensor* scratch_input_quantized = GetTemporary(context, node, 1);
00385     TfLiteTensor* scratch_scaling_factors = GetTemporary(context, node, 2);
00386     TfLiteTensor* scratch_float_weights_time = GetTemporary(context, node, 3);
00387 
00388     // Validate Input Quantized Scratch Tensor:
00389     TF_LITE_ENSURE(context, scratch_input_quantized->type == kTfLiteUInt8 ||
00390                                 scratch_input_quantized->type == kTfLiteInt8);
00391     TF_LITE_ENSURE_EQ(context, scratch_input_quantized->dims->data[0],
00392                       batch_size);
00393     TF_LITE_ENSURE_EQ(context, scratch_input_quantized->dims->data[1],
00394                       input_size);
00395 
00396     // Validate Scaling Factors Scratch Tensor:
00397     TF_LITE_ENSURE_EQ(context, scratch_scaling_factors->type, kTfLiteFloat32);
00398     TF_LITE_ENSURE_EQ(context, NumDimensions(scratch_scaling_factors), 1);
00399     TF_LITE_ENSURE_EQ(context, scratch_scaling_factors->dims->data[0],
00400                       batch_size);
00401 
00402     // Validate Float Weights Time Scratch Tensor:
00403     TF_LITE_ENSURE_EQ(context, scratch_float_weights_time->type,
00404                       kTfLiteFloat32);
00405     TF_LITE_ENSURE_EQ(context, NumDimensions(scratch_float_weights_time), 2);
00406     TF_LITE_ENSURE_EQ(context, scratch_float_weights_time->dims->data[0],
00407                       num_filters);
00408     TF_LITE_ENSURE_EQ(context, scratch_float_weights_time->dims->data[1],
00409                       memory_size);
00410 
00411     // TfLite Micro has scratch tensors allocated at the time that Prepare() is
00412     // called. Use this time to do a one-time de-quantization copy of
00413     // the input values from the Weights Time tensor to the float weights time
00414     // scratch tensor.
00415     // TODO(kreeger): Consider doing this at model conversion time?
00416     SymmetricDequantize(GetTensorData<int8_t>(weights_time),
00417                         NumElements(scratch_float_weights_time),
00418                         weights_time->params.scale,
00419                         GetTensorData<float>(scratch_float_weights_time));
00420   } else {
00421     // Validate Input Tensor dtypes:
00422     TF_LITE_ENSURE_EQ(context, weights_feature->type, kTfLiteFloat32);
00423     TF_LITE_ENSURE_EQ(context, weights_time->type, kTfLiteFloat32);
00424 
00425     // Full-float SVDF only uses the one shared scratch tensor (see above for
00426     // usage).
00427     // TODO(kreeger): Use input tensor as variable until scratch tensor
00428     // allocation has been implemented (cl/263032056)
00429     // TF_LITE_ENSURE_EQ(context, node->temporaries->size, 1);
00430   }
00431 
00432   // Validate Tensor Output:
00433   // [0] = float, {2, batch_size, num_units}
00434   TF_LITE_ENSURE_EQ(context, node->outputs->size, 1);
00435   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
00436   TF_LITE_ENSURE_EQ(context, output->type, kTfLiteFloat32);
00437   TF_LITE_ENSURE_EQ(context, NumDimensions(output), 2);
00438   TF_LITE_ENSURE_EQ(context, output->dims->data[0], batch_size);
00439   TF_LITE_ENSURE_EQ(context, output->dims->data[1], num_units);
00440 
00441   return kTfLiteOk;
00442 }
00443 
00444 TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
00445   const auto* params = reinterpret_cast<TfLiteSVDFParams*>(node->builtin_data);
00446 
00447   const TfLiteTensor* input = GetInput(context, node, kInputTensor);
00448   const TfLiteTensor* weights_feature =
00449       GetInput(context, node, kWeightsFeatureTensor);
00450   const TfLiteTensor* weights_time =
00451       GetInput(context, node, kWeightsTimeTensor);
00452   const TfLiteTensor* bias = GetOptionalInputTensor(context, node, kBiasTensor);
00453 
00454   // TODO(kreeger): Use input tensor as variable until scratch tensor allocation
00455   // has been implemented (cl/263032056)
00456   // TfLiteTensor* scratch = GetTemporary(context, node, /*index=*/0);
00457   TfLiteTensor* scratch = &context->tensors[node->inputs->data[5]];
00458 
00459   TfLiteTensor* activation_state =
00460       &context->tensors[node->inputs->data[kInputActivationStateTensor]];
00461   TfLiteTensor* output = GetOutput(context, node, kOutputTensor);
00462 
00463   switch (weights_feature->type) {
00464     case kTfLiteFloat32: {
00465       EvalFloatSVDF(context, node, input, weights_feature, weights_time, bias,
00466                     params, scratch, activation_state, output);
00467       return kTfLiteOk;
00468       break;
00469     }
00470 
00471     case kTfLiteUInt8:
00472     case kTfLiteInt8: {
00473       TfLiteTensor* scratch_input_quantized = GetTemporary(context, node, 1);
00474       TfLiteTensor* scratch_scaling_factors = GetTemporary(context, node, 2);
00475       TfLiteTensor* scratch_float_weights_time = GetTemporary(context, node, 3);
00476       EvalHybridSVDF(context, node, input, weights_feature,
00477                      scratch_float_weights_time, bias, params, scratch,
00478                      scratch_scaling_factors, scratch_input_quantized,
00479                      activation_state, output);
00480       return kTfLiteOk;
00481       break;
00482     }
00483 
00484     default:
00485       // TODO(kreeger): Handle this case for full quant svdf b/139435798
00486       context->ReportError(context, "Type %s not currently supported.",
00487                            TfLiteTypeGetName(weights_feature->type));
00488       return kTfLiteError;
00489   }
00490   return kTfLiteOk;
00491 }
00492 
00493 }  // namespace svdf
00494 
00495 TfLiteRegistration* Register_SVDF() {
00496   static TfLiteRegistration r = {svdf::Init, svdf::Free, svdf::Prepare,
00497                                  svdf::Eval};
00498   return &r;
00499 }
00500 
00501 }  // namespace micro
00502 }  // namespace ops
00503 }  // namespace tflite