code stringlengths 1 1.05M | repo_name stringlengths 6 83 | path stringlengths 3 242 | language stringclasses 222
values | license stringclasses 20
values | size int64 1 1.05M |
|---|---|---|---|---|---|
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/c/c_api_types.h"
#ifndef TF_LITE_STATIC_MEMORY
#include <stdlib.h>
#include <string.h>
#endif // TF_LITE_STATIC_MEMORY
int TfLiteIntArrayGetSizeInBytes(int size) {
static TfLiteIntArray dummy;
return sizeof(dummy) + sizeof(dummy.data[0]) * size;
}
int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b) {
if (a == b) return 1;
if (a == NULL || b == NULL) return 0;
return TfLiteIntArrayEqualsArray(a, b->size, b->data);
}
int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
const int b_data[]) {
if (a == NULL) return (b_size == 0);
if (a->size != b_size) return 0;
int i = 0;
for (; i < a->size; i++)
if (a->data[i] != b_data[i]) return 0;
return 1;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteIntArray* TfLiteIntArrayCreate(int size) {
int alloc_size = TfLiteIntArrayGetSizeInBytes(size);
if (alloc_size <= 0) return NULL;
TfLiteIntArray* ret = (TfLiteIntArray*)malloc(alloc_size);
if (!ret) return ret;
ret->size = size;
return ret;
}
TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src) {
if (!src) return NULL;
TfLiteIntArray* ret = TfLiteIntArrayCreate(src->size);
if (ret) {
memcpy(ret->data, src->data, src->size * sizeof(int));
}
return ret;
}
void TfLiteIntArrayFree(TfLiteIntArray* a) { free(a); }
#endif // TF_LITE_STATIC_MEMORY
int TfLiteFloatArrayGetSizeInBytes(int size) {
static TfLiteFloatArray dummy;
return sizeof(dummy) + sizeof(dummy.data[0]) * size;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteFloatArray* TfLiteFloatArrayCreate(int size) {
TfLiteFloatArray* ret =
(TfLiteFloatArray*)malloc(TfLiteFloatArrayGetSizeInBytes(size));
ret->size = size;
return ret;
}
void TfLiteFloatArrayFree(TfLiteFloatArray* a) { free(a); }
void TfLiteTensorDataFree(TfLiteTensor* t) {
if (t->allocation_type == kTfLiteDynamic ||
t->allocation_type == kTfLitePersistentRo) {
free(t->data.raw);
}
t->data.raw = NULL;
}
void TfLiteQuantizationFree(TfLiteQuantization* quantization) {
if (quantization->type == kTfLiteAffineQuantization) {
TfLiteAffineQuantization* q_params =
(TfLiteAffineQuantization*)(quantization->params);
if (q_params->scale) {
TfLiteFloatArrayFree(q_params->scale);
q_params->scale = NULL;
}
if (q_params->zero_point) {
TfLiteIntArrayFree(q_params->zero_point);
q_params->zero_point = NULL;
}
free(q_params);
}
quantization->params = NULL;
quantization->type = kTfLiteNoQuantization;
}
void TfLiteSparsityFree(TfLiteSparsity* sparsity) {
if (sparsity == NULL) {
return;
}
if (sparsity->traversal_order) {
TfLiteIntArrayFree(sparsity->traversal_order);
sparsity->traversal_order = NULL;
}
if (sparsity->block_map) {
TfLiteIntArrayFree(sparsity->block_map);
sparsity->block_map = NULL;
}
if (sparsity->dim_metadata) {
int i = 0;
for (; i < sparsity->dim_metadata_size; i++) {
TfLiteDimensionMetadata metadata = sparsity->dim_metadata[i];
if (metadata.format == kTfLiteDimSparseCSR) {
TfLiteIntArrayFree(metadata.array_segments);
metadata.array_segments = NULL;
TfLiteIntArrayFree(metadata.array_indices);
metadata.array_indices = NULL;
}
}
free(sparsity->dim_metadata);
sparsity->dim_metadata = NULL;
}
free(sparsity);
}
void TfLiteTensorFree(TfLiteTensor* t) {
TfLiteTensorDataFree(t);
if (t->dims) TfLiteIntArrayFree(t->dims);
t->dims = NULL;
if (t->dims_signature) {
TfLiteIntArrayFree((TfLiteIntArray *) t->dims_signature);
}
t->dims_signature = NULL;
TfLiteQuantizationFree(&t->quantization);
TfLiteSparsityFree(t->sparsity);
t->sparsity = NULL;
}
void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
TfLiteQuantizationParams quantization, char* buffer,
size_t size, TfLiteAllocationType allocation_type,
const void* allocation, bool is_variable,
TfLiteTensor* tensor) {
TfLiteTensorFree(tensor);
tensor->type = type;
tensor->name = name;
tensor->dims = dims;
tensor->params = quantization;
tensor->data.raw = buffer;
tensor->bytes = size;
tensor->allocation_type = allocation_type;
tensor->allocation = allocation;
tensor->is_variable = is_variable;
tensor->quantization.type = kTfLiteNoQuantization;
tensor->quantization.params = NULL;
}
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor) {
if (tensor->allocation_type != kTfLiteDynamic &&
tensor->allocation_type != kTfLitePersistentRo) {
return;
}
// TODO(b/145340303): Tensor data should be aligned.
if (!tensor->data.raw) {
tensor->data.raw = malloc(num_bytes);
} else if (num_bytes > tensor->bytes) {
tensor->data.raw = realloc(tensor->data.raw, num_bytes);
}
tensor->bytes = num_bytes;
}
#endif // TF_LITE_STATIC_MEMORY
const char* TfLiteTypeGetName(TfLiteType type) {
switch (type) {
case kTfLiteNoType:
return "NOTYPE";
case kTfLiteFloat32:
return "FLOAT32";
case kTfLiteInt16:
return "INT16";
case kTfLiteInt32:
return "INT32";
case kTfLiteUInt32:
return "UINT32";
case kTfLiteUInt8:
return "UINT8";
case kTfLiteInt8:
return "INT8";
case kTfLiteInt64:
return "INT64";
case kTfLiteUInt64:
return "UINT64";
case kTfLiteBool:
return "BOOL";
case kTfLiteComplex64:
return "COMPLEX64";
case kTfLiteComplex128:
return "COMPLEX128";
case kTfLiteString:
return "STRING";
case kTfLiteFloat16:
return "FLOAT16";
case kTfLiteFloat64:
return "FLOAT64";
case kTfLiteResource:
return "RESOURCE";
case kTfLiteVariant:
return "VARIANT";
}
return "Unknown type";
}
TfLiteDelegate TfLiteDelegateCreate() {
TfLiteDelegate d = {
.data_ = NULL,
.Prepare = NULL,
.CopyFromBufferHandle = NULL,
.CopyToBufferHandle = NULL,
.FreeBufferHandle = NULL,
.flags = kTfLiteDelegateFlagsNone,
};
return d;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/c/common.c | C | apache-2.0 | 6,872 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file defines common C types and APIs for implementing operations,
// delegates and other constructs in TensorFlow Lite. The actual operations and
// delegates can be defined using C++, but the interface between the interpreter
// and the operations are C.
//
// Summary of abstractions
// TF_LITE_ENSURE - Self-sufficient error checking
// TfLiteStatus - Status reporting
// TfLiteIntArray - stores tensor shapes (dims),
// TfLiteContext - allows an op to access the tensors
// TfLiteTensor - tensor (a multidimensional array)
// TfLiteNode - a single node or operation
// TfLiteRegistration - the implementation of a conceptual operation.
// TfLiteDelegate - allows delegation of nodes to alternative backends.
//
// Some abstractions in this file are created and managed by Interpreter.
//
// NOTE: The order of values in these structs are "semi-ABI stable". New values
// should be added only to the end of structs and never reordered.
#ifndef TENSORFLOW_LITE_C_COMMON_H_
#define TENSORFLOW_LITE_C_COMMON_H_
#include <stdbool.h>
#include <stddef.h>
#include <stdint.h>
#include "tensorflow/lite/c/c_api_types.h" // IWYU pragma: export
#ifdef __cplusplus
extern "C" {
#endif // __cplusplus
// The list of external context types known to TF Lite. This list exists solely
// to avoid conflicts and to ensure ops can share the external contexts they
// need. Access to the external contexts is controlled by one of the
// corresponding support files.
typedef enum TfLiteExternalContextType {
kTfLiteEigenContext = 0, // include eigen_support.h to use.
kTfLiteGemmLowpContext = 1, // include gemm_support.h to use.
kTfLiteEdgeTpuContext = 2, // Placeholder for Edge TPU support.
kTfLiteCpuBackendContext = 3, // include cpu_backend_context.h to use.
kTfLiteMaxExternalContexts = 4
} TfLiteExternalContextType;
// Forward declare so dependent structs and methods can reference these types
// prior to the struct definitions.
struct TfLiteContext;
struct TfLiteDelegate;
struct TfLiteRegistration;
// An external context is a collection of information unrelated to the TF Lite
// framework, but useful to a subset of the ops. TF Lite knows very little
// about the actual contexts, but it keeps a list of them, and is able to
// refresh them if configurations like the number of recommended threads
// change.
typedef struct TfLiteExternalContext {
TfLiteExternalContextType type;
TfLiteStatus (*Refresh)(struct TfLiteContext* context);
} TfLiteExternalContext;
#define kTfLiteOptionalTensor (-1)
// Fixed size list of integers. Used for dimensions and inputs/outputs tensor
// indices
typedef struct TfLiteIntArray {
int size;
// gcc 6.1+ have a bug where flexible members aren't properly handled
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
#if (!defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
__GNUC_MINOR__ >= 1) || \
defined(HEXAGON) || \
(defined(__clang__) && __clang_major__ == 7 && __clang_minor__ == 1)
int data[0];
#else
int data[];
#endif
} TfLiteIntArray;
// Given the size (number of elements) in a TfLiteIntArray, calculate its size
// in bytes.
int TfLiteIntArrayGetSizeInBytes(int size);
#ifndef TF_LITE_STATIC_MEMORY
// Create a array of a given `size` (uninitialized entries).
// This returns a pointer, that you must free using TfLiteIntArrayFree().
TfLiteIntArray* TfLiteIntArrayCreate(int size);
#endif
// Check if two intarrays are equal. Returns 1 if they are equal, 0 otherwise.
int TfLiteIntArrayEqual(const TfLiteIntArray* a, const TfLiteIntArray* b);
// Check if an intarray equals an array. Returns 1 if equals, 0 otherwise.
int TfLiteIntArrayEqualsArray(const TfLiteIntArray* a, int b_size,
const int b_data[]);
#ifndef TF_LITE_STATIC_MEMORY
// Create a copy of an array passed as `src`.
// You are expected to free memory with TfLiteIntArrayFree
TfLiteIntArray* TfLiteIntArrayCopy(const TfLiteIntArray* src);
// Free memory of array `a`.
void TfLiteIntArrayFree(TfLiteIntArray* a);
#endif // TF_LITE_STATIC_MEMORY
// Fixed size list of floats. Used for per-channel quantization.
typedef struct TfLiteFloatArray {
int size;
// gcc 6.1+ have a bug where flexible members aren't properly handled
// https://github.com/google/re2/commit/b94b7cd42e9f02673cd748c1ac1d16db4052514c
// This also applies to the toolchain used for Qualcomm Hexagon DSPs.
#if !defined(__clang__) && defined(__GNUC__) && __GNUC__ == 6 && \
__GNUC_MINOR__ >= 1
float data[0];
#else
float data[];
#endif
} TfLiteFloatArray;
// Given the size (number of elements) in a TfLiteFloatArray, calculate its size
// in bytes.
int TfLiteFloatArrayGetSizeInBytes(int size);
#ifndef TF_LITE_STATIC_MEMORY
// Create a array of a given `size` (uninitialized entries).
// This returns a pointer, that you must free using TfLiteFloatArrayFree().
TfLiteFloatArray* TfLiteFloatArrayCreate(int size);
// Free memory of array `a`.
void TfLiteFloatArrayFree(TfLiteFloatArray* a);
#endif // TF_LITE_STATIC_MEMORY
// Since we must not depend on any libraries, define a minimal subset of
// error macros while avoiding names that have pre-conceived meanings like
// assert and check.
// Try to make all reporting calls through TF_LITE_KERNEL_LOG rather than
// calling the context->ReportError function directly, so that message strings
// can be stripped out if the binary size needs to be severely optimized.
#ifndef TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_KERNEL_LOG(context, ...) \
do { \
(context)->ReportError((context), __VA_ARGS__); \
} while (false)
#define TF_LITE_MAYBE_KERNEL_LOG(context, ...) \
do { \
if ((context) != nullptr) { \
(context)->ReportError((context), __VA_ARGS__); \
} \
} while (false)
#else // TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_KERNEL_LOG(context, ...)
#define TF_LITE_MAYBE_KERNEL_LOG(context, ...)
#endif // TF_LITE_STRIP_ERROR_STRINGS
// Check whether value is true, and if not return kTfLiteError from
// the current function (and report the error string msg).
#define TF_LITE_ENSURE_MSG(context, value, msg) \
do { \
if (!(value)) { \
TF_LITE_KERNEL_LOG((context), __FILE__ " " msg); \
return kTfLiteError; \
} \
} while (0)
// Check whether the value `a` is true, and if not return kTfLiteError from
// the current function, while also reporting the location of the error.
#define TF_LITE_ENSURE(context, a) \
do { \
if (!(a)) { \
TF_LITE_KERNEL_LOG((context), "%s:%d %s was not true.", __FILE__, \
__LINE__, #a); \
return kTfLiteError; \
} \
} while (0)
#define TF_LITE_ENSURE_STATUS(a) \
do { \
const TfLiteStatus s = (a); \
if (s != kTfLiteOk) { \
return s; \
} \
} while (0)
// Check whether the value `a == b` is true, and if not return kTfLiteError from
// the current function, while also reporting the location of the error.
// `a` and `b` may be evaluated more than once, so no side effects or
// extremely expensive computations should be done.
// NOTE: Use TF_LITE_ENSURE_TYPES_EQ if comparing TfLiteTypes.
#define TF_LITE_ENSURE_EQ(context, a, b) \
do { \
if ((a) != (b)) { \
TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%d != %d)", __FILE__, \
__LINE__, #a, #b, (a), (b)); \
return kTfLiteError; \
} \
} while (0)
#define TF_LITE_ENSURE_TYPES_EQ(context, a, b) \
do { \
if ((a) != (b)) { \
TF_LITE_KERNEL_LOG((context), "%s:%d %s != %s (%s != %s)", __FILE__, \
__LINE__, #a, #b, TfLiteTypeGetName(a), \
TfLiteTypeGetName(b)); \
return kTfLiteError; \
} \
} while (0)
#define TF_LITE_ENSURE_NEAR(context, a, b, epsilon) \
do { \
auto delta = ((a) > (b)) ? ((a) - (b)) : ((b) - (a)); \
if (delta > epsilon) { \
TF_LITE_KERNEL_LOG((context), "%s:%d %s not near %s (%f != %f)", \
__FILE__, __LINE__, #a, #b, static_cast<double>(a), \
static_cast<double>(b)); \
return kTfLiteError; \
} \
} while (0)
#define TF_LITE_ENSURE_OK(context, status) \
do { \
const TfLiteStatus s = (status); \
if ((s) != kTfLiteOk) { \
return s; \
} \
} while (0)
// Single-precision complex data type compatible with the C99 definition.
typedef struct TfLiteComplex64 {
float re, im; // real and imaginary parts, respectively.
} TfLiteComplex64;
// Double-precision complex data type compatible with the C99 definition.
typedef struct TfLiteComplex128 {
double re, im; // real and imaginary parts, respectively.
} TfLiteComplex128;
// Half precision data type compatible with the C99 definition.
typedef struct TfLiteFloat16 {
uint16_t data;
} TfLiteFloat16;
// Return the name of a given type, for error reporting purposes.
const char* TfLiteTypeGetName(TfLiteType type);
// SupportedQuantizationTypes.
typedef enum TfLiteQuantizationType {
// No quantization.
kTfLiteNoQuantization = 0,
// Affine quantization (with support for per-channel quantization).
// Corresponds to TfLiteAffineQuantization.
kTfLiteAffineQuantization = 1,
} TfLiteQuantizationType;
// Structure specifying the quantization used by the tensor, if-any.
typedef struct TfLiteQuantization {
// The type of quantization held by params.
TfLiteQuantizationType type;
// Holds an optional reference to a quantization param structure. The actual
// type depends on the value of the `type` field (see the comment there for
// the values and corresponding types).
void* params;
} TfLiteQuantization;
// Parameters for asymmetric quantization across a dimension (i.e per output
// channel quantization).
// quantized_dimension specifies which dimension the scales and zero_points
// correspond to.
// For a particular value in quantized_dimension, quantized values can be
// converted back to float using:
// real_value = scale * (quantized_value - zero_point)
typedef struct TfLiteAffineQuantization {
TfLiteFloatArray* scale;
TfLiteIntArray* zero_point;
int32_t quantized_dimension;
} TfLiteAffineQuantization;
/* A union of pointers that points to memory for a given tensor. */
typedef union TfLitePtrUnion {
/* Do not access these members directly, if possible, use
* GetTensorData<TYPE>(tensor) instead, otherwise only access .data, as other
* members are deprecated. */
int32_t* i32;
uint32_t* u32;
int64_t* i64;
uint64_t* u64;
float* f;
TfLiteFloat16* f16;
double* f64;
char* raw;
const char* raw_const;
uint8_t* uint8;
bool* b;
int16_t* i16;
TfLiteComplex64* c64;
TfLiteComplex128* c128;
int8_t* int8;
/* Only use this member. */
void* data;
} TfLitePtrUnion;
// Memory allocation strategies.
// * kTfLiteMmapRo: Read-only memory-mapped data, or data externally allocated.
// * kTfLiteArenaRw: Arena allocated with no guarantees about persistence,
// and available during eval.
// * kTfLiteArenaRwPersistent: Arena allocated but persistent across eval, and
// only available during eval.
// * kTfLiteDynamic: Allocated during eval, or for string tensors.
// * kTfLitePersistentRo: Allocated and populated during prepare. This is
// useful for tensors that can be computed during prepare and treated
// as constant inputs for downstream ops (also in prepare).
// * kTfLiteCustom: Custom memory allocation provided by the user. See
// TfLiteCustomAllocation below.
typedef enum TfLiteAllocationType {
kTfLiteMemNone = 0,
kTfLiteMmapRo,
kTfLiteArenaRw,
kTfLiteArenaRwPersistent,
kTfLiteDynamic,
kTfLitePersistentRo,
kTfLiteCustom,
} TfLiteAllocationType;
// The delegates should use zero or positive integers to represent handles.
// -1 is reserved from unallocated status.
typedef int TfLiteBufferHandle;
enum {
kTfLiteNullBufferHandle = -1,
};
// Storage format of each dimension in a sparse tensor.
typedef enum TfLiteDimensionType {
kTfLiteDimDense = 0,
kTfLiteDimSparseCSR,
} TfLiteDimensionType;
// Metadata to encode each dimension in a sparse tensor.
typedef struct TfLiteDimensionMetadata {
TfLiteDimensionType format;
int dense_size;
TfLiteIntArray* array_segments;
TfLiteIntArray* array_indices;
} TfLiteDimensionMetadata;
// Parameters used to encode a sparse tensor. For detailed explanation of each
// field please refer to lite/schema/schema.fbs.
typedef struct TfLiteSparsity {
TfLiteIntArray* traversal_order;
TfLiteIntArray* block_map;
TfLiteDimensionMetadata* dim_metadata;
int dim_metadata_size;
} TfLiteSparsity;
// Defines a custom memory allocation not owned by the runtime.
// `data` should be aligned to kDefaultTensorAlignment defined in
// lite/util.h. (Currently 64 bytes)
// NOTE: See Interpreter.SetCustomAllocationForTensor for details on usage.
typedef struct TfLiteCustomAllocation {
void* data;
size_t bytes;
} TfLiteCustomAllocation;
// The flags used in `Interpreter::SetCustomAllocationForTensor`.
// Note that this is a bitmask, so the values should be 1, 2, 4, 8, ...etc.
typedef enum TfLiteCustomAllocationFlags {
kTfLiteCustomAllocationFlagsNone = 0,
// Skips checking whether allocation.data points to an aligned buffer as
// expected by the TFLite runtime.
// NOTE: Setting this flag can cause crashes when calling Invoke().
// Use with caution.
kTfLiteCustomAllocationFlagsSkipAlignCheck = 1,
} TfLiteCustomAllocationFlags;
// A tensor in the interpreter system which is a wrapper around a buffer of
// data including a dimensionality (or NULL if not currently defined).
#ifndef TF_LITE_STATIC_MEMORY
typedef struct TfLiteTensor {
// The data type specification for data stored in `data`. This affects
// what member of `data` union should be used.
TfLiteType type;
// A union of data pointers. The appropriate type should be used for a typed
// tensor based on `type`.
TfLitePtrUnion data;
// A pointer to a structure representing the dimensionality interpretation
// that the buffer should have. NOTE: the product of elements of `dims`
// and the element datatype size should be equal to `bytes` below.
TfLiteIntArray* dims;
// Quantization information.
TfLiteQuantizationParams params;
// How memory is mapped
// kTfLiteMmapRo: Memory mapped read only.
// i.e. weights
// kTfLiteArenaRw: Arena allocated read write memory
// (i.e. temporaries, outputs).
TfLiteAllocationType allocation_type;
// The number of bytes required to store the data of this Tensor. I.e.
// (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
// type is kTfLiteFloat32 and dims = {3, 2} then
// bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
size_t bytes;
// An opaque pointer to a tflite::MMapAllocation
const void* allocation;
// Null-terminated name of this tensor.
const char* name;
// The delegate which knows how to handle `buffer_handle`.
// WARNING: This is an experimental interface that is subject to change.
struct TfLiteDelegate* delegate;
// An integer buffer handle that can be handled by `delegate`.
// The value is valid only when delegate is not null.
// WARNING: This is an experimental interface that is subject to change.
TfLiteBufferHandle buffer_handle;
// If the delegate uses its own buffer (e.g. GPU memory), the delegate is
// responsible to set data_is_stale to true.
// `delegate->CopyFromBufferHandle` can be called to copy the data from
// delegate buffer.
// WARNING: This is an // experimental interface that is subject to change.
bool data_is_stale;
// True if the tensor is a variable.
bool is_variable;
// Quantization information. Replaces params field above.
TfLiteQuantization quantization;
// Parameters used to encode a sparse tensor.
// This is optional. The field is NULL if a tensor is dense.
// WARNING: This is an experimental interface that is subject to change.
TfLiteSparsity* sparsity;
// Optional. Encodes shapes with unknown dimensions with -1. This field is
// only populated when unknown dimensions exist in a read-write tensor (i.e.
// an input or output tensor). (e.g. `dims` contains [1, 1, 1, 3] and
// `dims_signature` contains [1, -1, -1, 3]).
const TfLiteIntArray* dims_signature;
} TfLiteTensor;
// A structure representing an instance of a node.
// This structure only exhibits the inputs, outputs, user defined data and some
// node properties (like statefulness), not other features like the type.
typedef struct TfLiteNode {
// Inputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* inputs;
// Outputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* outputs;
// intermediate tensors to this node expressed as indices into the simulator's
// tensors.
TfLiteIntArray* intermediates;
// Temporary tensors uses during the computations. This usually contains no
// tensors, but ops are allowed to change that if they need scratch space of
// any sort.
TfLiteIntArray* temporaries;
// Opaque data provided by the node implementer through `Registration.init`.
void* user_data;
// Opaque data provided to the node if the node is a builtin. This is usually
// a structure defined in builtin_op_data.h
void* builtin_data;
// Custom initial data. This is the opaque data provided in the flatbuffer.
// WARNING: This is an experimental interface that is subject to change.
const void* custom_initial_data;
int custom_initial_data_size;
// The pointer to the delegate. This is non-null only when the node is
// created by calling `interpreter.ModifyGraphWithDelegate`.
// WARNING: This is an experimental interface that is subject to change.
struct TfLiteDelegate* delegate;
// Whether this op might have side effect (e.g. stateful op).
bool might_have_side_effect;
} TfLiteNode;
#else // defined(TF_LITE_STATIC_MEMORY)?
// NOTE: This flag is opt-in only at compile time.
//
// Specific reduced TfLiteTensor struct for TF Micro runtime. This struct
// contains only the minimum fields required to initialize and prepare a micro
// inference graph. The fields in this struct have been ordered from
// largest-to-smallest for optimal struct sizeof.
//
// This struct does not use:
// - allocation
// - buffer_handle
// - data_is_stale
// - delegate
// - dims_signature
// - name
// - sparsity
typedef struct TfLiteTensor {
// TODO(b/155784997): Consider consolidating these quantization fields:
// Quantization information. Replaces params field above.
TfLiteQuantization quantization;
// Quantization information.
TfLiteQuantizationParams params;
// A union of data pointers. The appropriate type should be used for a typed
// tensor based on `type`.
TfLitePtrUnion data;
// A pointer to a structure representing the dimensionality interpretation
// that the buffer should have. NOTE: the product of elements of `dims`
// and the element datatype size should be equal to `bytes` below.
TfLiteIntArray* dims;
// The number of bytes required to store the data of this Tensor. I.e.
// (bytes of each element) * dims[0] * ... * dims[n-1]. For example, if
// type is kTfLiteFloat32 and dims = {3, 2} then
// bytes = sizeof(float) * 3 * 2 = 4 * 3 * 2 = 24.
size_t bytes;
// The data type specification for data stored in `data`. This affects
// what member of `data` union should be used.
TfLiteType type;
// How memory is mapped
// kTfLiteMmapRo: Memory mapped read only.
// i.e. weights
// kTfLiteArenaRw: Arena allocated read write memory
// (i.e. temporaries, outputs).
TfLiteAllocationType allocation_type;
// True if the tensor is a variable.
bool is_variable;
} TfLiteTensor;
// Specific reduced TfLiteNode struct for TF Micro runtime. This struct contains
// only the minimum fields required to represent a node.
//
// This struct does not use:
// - delegate
// - intermediates
// - temporaries
typedef struct TfLiteNode {
// Inputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* inputs;
// Outputs to this node expressed as indices into the simulator's tensors.
TfLiteIntArray* outputs;
// Opaque data provided by the node implementer through `Registration.init`.
void* user_data;
// Opaque data provided to the node if the node is a builtin. This is usually
// a structure defined in builtin_op_data.h
void* builtin_data;
// Custom initial data. This is the opaque data provided in the flatbuffer.
// WARNING: This is an experimental interface that is subject to change.
const void* custom_initial_data;
int custom_initial_data_size;
} TfLiteNode;
#endif // TF_LITE_STATIC_MEMORY
// Light-weight tensor struct for TF Micro runtime. Provides the minimal amount
// of information required for a kernel to run during TfLiteRegistration::Eval.
// TODO(b/160955687): Move this field into TF_LITE_STATIC_MEMORY when TFLM
// builds with this flag by default internally.
typedef struct TfLiteEvalTensor {
// A union of data pointers. The appropriate type should be used for a typed
// tensor based on `type`.
TfLitePtrUnion data;
// A pointer to a structure representing the dimensionality interpretation
// that the buffer should have.
TfLiteIntArray* dims;
// The data type specification for data stored in `data`. This affects
// what member of `data` union should be used.
TfLiteType type;
} TfLiteEvalTensor;
#ifndef TF_LITE_STATIC_MEMORY
// Free data memory of tensor `t`.
void TfLiteTensorDataFree(TfLiteTensor* t);
// Free quantization data.
void TfLiteQuantizationFree(TfLiteQuantization* quantization);
// Free sparsity parameters.
void TfLiteSparsityFree(TfLiteSparsity* sparsity);
// Free memory of tensor `t`.
void TfLiteTensorFree(TfLiteTensor* t);
// Set all of a tensor's fields (and free any previously allocated data).
void TfLiteTensorReset(TfLiteType type, const char* name, TfLiteIntArray* dims,
TfLiteQuantizationParams quantization, char* buffer,
size_t size, TfLiteAllocationType allocation_type,
const void* allocation, bool is_variable,
TfLiteTensor* tensor);
// Resize the allocated data of a (dynamic) tensor. Tensors with allocation
// types other than kTfLiteDynamic will be ignored.
void TfLiteTensorRealloc(size_t num_bytes, TfLiteTensor* tensor);
#endif // TF_LITE_STATIC_MEMORY
// WARNING: This is an experimental interface that is subject to change.
//
// Currently, TfLiteDelegateParams has to be allocated in a way that it's
// trivially destructable. It will be stored as `builtin_data` field in
// `TfLiteNode` of the delegate node.
//
// See also the `CreateDelegateParams` function in `interpreter.cc` details.
typedef struct TfLiteDelegateParams {
struct TfLiteDelegate* delegate;
TfLiteIntArray* nodes_to_replace;
TfLiteIntArray* input_tensors;
TfLiteIntArray* output_tensors;
} TfLiteDelegateParams;
typedef struct TfLiteContext {
// Number of tensors in the context.
size_t tensors_size;
// The execution plan contains a list of the node indices in execution
// order. execution_plan->size is the current number of nodes. And,
// execution_plan->data[0] is the first node that needs to be run.
// TfLiteDelegates can traverse the current execution plan by iterating
// through each member of this array and using GetNodeAndRegistration() to
// access details about a node. i.e.
//
// TfLiteIntArray* execution_plan;
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &execution_plan));
// for (int exec_index = 0; exec_index < execution_plan->size; exec_index++) {
// int node_index = execution_plan->data[exec_index];
// TfLiteNode* node;
// TfLiteRegistration* reg;
// context->GetNodeAndRegistration(context, node_index, &node, ®);
// }
// Note: the memory pointed by '`*execution_plan` is OWNED by TfLite runtime.
// Future calls to GetExecutionPlan invalidates earlier outputs. The following
// code snippet shows the issue of such an invocation pattern. After calling
// CheckNode, subsequent access to `plan_1st` is undefined.
//
// void CheckNode(const TfLiteNode* node) {
// ...
// TfLiteIntArray* plan_2nd;
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_2nd));
// ...
// }
//
// TfLiteIntArray* plan_1st;
// TF_LITE_ENSURE_STATUS(context->GetExecutionPlan(context, &plan_1st));
// for (int exec_index = 0; exec_index < plan_1st->size; exec_index++) {
// int node_index = plan_1st->data[exec_index];
// TfLiteNode* node;
// TfLiteRegistration* reg;
// context->GetNodeAndRegistration(context, node_index, &node, ®);
// CheckNode(node);
// }
//
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*GetExecutionPlan)(struct TfLiteContext* context,
TfLiteIntArray** execution_plan);
// An array of tensors in the interpreter context (of length `tensors_size`)
TfLiteTensor* tensors;
// opaque full context ptr (an opaque c++ data structure)
void* impl_;
// Request memory pointer be resized. Updates dimensions on the tensor.
// NOTE: ResizeTensor takes ownership of newSize.
TfLiteStatus (*ResizeTensor)(struct TfLiteContext*, TfLiteTensor* tensor,
TfLiteIntArray* new_size);
// Request that an error be reported with format string msg.
void (*ReportError)(struct TfLiteContext*, const char* msg, ...);
// Add `tensors_to_add` tensors, preserving pre-existing Tensor entries. If
// non-null, the value pointed to by `first_new_tensor_index` will be set to
// the index of the first new tensor.
TfLiteStatus (*AddTensors)(struct TfLiteContext*, int tensors_to_add,
int* first_new_tensor_index);
// Get a Tensor node by node_index.
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*GetNodeAndRegistration)(
struct TfLiteContext*, int node_index, TfLiteNode** node,
struct TfLiteRegistration** registration);
// Replace ops with one or more stub delegate operations. This function
// does not take ownership of `nodes_to_replace`.
TfLiteStatus (*ReplaceNodeSubsetsWithDelegateKernels)(
struct TfLiteContext*, struct TfLiteRegistration registration,
const TfLiteIntArray* nodes_to_replace, struct TfLiteDelegate* delegate);
// Number of threads that are recommended to subsystems like gemmlowp and
// eigen.
int recommended_num_threads;
// Access external contexts by type.
// WARNING: This is an experimental interface that is subject to change.
TfLiteExternalContext* (*GetExternalContext)(struct TfLiteContext*,
TfLiteExternalContextType);
// Set the value of a external context. Does not take ownership of the
// pointer.
// WARNING: This is an experimental interface that is subject to change.
void (*SetExternalContext)(struct TfLiteContext*, TfLiteExternalContextType,
TfLiteExternalContext*);
// Flag for allowing float16 precision for FP32 calculation.
// default: false.
// WARNING: This is an experimental API and subject to change.
bool allow_fp32_relax_to_fp16;
// Pointer to the op-level profiler, if set; nullptr otherwise.
void* profiler;
// Allocate persistent buffer which has the same life time as the interpreter.
// Returns nullptr on failure.
// The memory is allocated from heap for TFL, and from tail in TFLM.
// This method is only available in Init or Prepare stage.
// WARNING: This is an experimental interface that is subject to change.
void* (*AllocatePersistentBuffer)(struct TfLiteContext* ctx, size_t bytes);
// Allocate a buffer which will be deallocated right after invoke phase.
// The memory is allocated from heap in TFL, and from volatile arena in TFLM.
// This method is only available in invoke stage.
// NOTE: If possible use RequestScratchBufferInArena method to avoid memory
// allocation during inference time.
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*AllocateBufferForEval)(struct TfLiteContext* ctx, size_t bytes,
void** ptr);
// Request a scratch buffer in the arena through static memory planning.
// This method is only available in Prepare stage and the buffer is allocated
// by the interpreter between Prepare and Eval stage. In Eval stage,
// GetScratchBuffer API can be used to fetch the address.
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*RequestScratchBufferInArena)(struct TfLiteContext* ctx,
size_t bytes, int* buffer_idx);
// Get the scratch buffer pointer.
// This method is only available in Eval stage.
// WARNING: This is an experimental interface that is subject to change.
void* (*GetScratchBuffer)(struct TfLiteContext* ctx, int buffer_idx);
// Resize the memory pointer of the `tensor`. This method behaves the same as
// `ResizeTensor`, except that it makes a copy of the shape array internally
// so the shape array could be deallocated right afterwards.
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*ResizeTensorExplicit)(struct TfLiteContext* ctx,
TfLiteTensor* tensor, int dims,
const int* shape);
// This method provides a preview of post-delegation partitioning. Each
// TfLiteDelegateParams in the referenced array corresponds to one instance of
// the delegate kernel.
// Example usage:
//
// TfLiteIntArray* nodes_to_replace = ...;
// TfLiteDelegateParams* params_array;
// int num_partitions = 0;
// TF_LITE_ENSURE_STATUS(context->PreviewDelegatePartitioning(
// context, delegate, nodes_to_replace, ¶ms_array, &num_partitions));
// for (int idx = 0; idx < num_partitions; idx++) {
// const auto& partition_params = params_array[idx];
// ...
// }
//
// NOTE: The context owns the memory referenced by partition_params_array. It
// will be cleared with another call to PreviewDelegateParitioning, or after
// TfLiteDelegateParams::Prepare returns.
//
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus (*PreviewDelegatePartitioning)(
struct TfLiteContext* context, const TfLiteIntArray* nodes_to_replace,
TfLiteDelegateParams** partition_params_array, int* num_partitions);
// Returns a TfLiteTensor struct for a given index.
// WARNING: This is an experimental interface that is subject to change.
// WARNING: This method may not be available on all platforms.
TfLiteTensor* (*GetTensor)(const struct TfLiteContext* context,
int tensor_idx);
// Returns a TfLiteEvalTensor struct for a given index.
// WARNING: This is an experimental interface that is subject to change.
// WARNING: This method may not be available on all platforms.
TfLiteEvalTensor* (*GetEvalTensor)(const struct TfLiteContext* context,
int tensor_idx);
} TfLiteContext;
typedef struct TfLiteRegistration {
// Initializes the op from serialized data.
// If a built-in op:
// `buffer` is the op's params data (TfLiteLSTMParams*).
// `length` is zero.
// If custom op:
// `buffer` is the op's `custom_options`.
// `length` is the size of the buffer.
//
// Returns a type-punned (i.e. void*) opaque data (e.g. a primitive pointer
// or an instance of a struct).
//
// The returned pointer will be stored with the node in the `user_data` field,
// accessible within prepare and invoke functions below.
// NOTE: if the data is already in the desired format, simply implement this
// function to return `nullptr` and implement the free function to be a no-op.
void* (*init)(TfLiteContext* context, const char* buffer, size_t length);
// The pointer `buffer` is the data previously returned by an init invocation.
void (*free)(TfLiteContext* context, void* buffer);
// prepare is called when the inputs this node depends on have been resized.
// context->ResizeTensor() can be called to request output tensors to be
// resized.
//
// Returns kTfLiteOk on success.
TfLiteStatus (*prepare)(TfLiteContext* context, TfLiteNode* node);
// Execute the node (should read node->inputs and output to node->outputs).
// Returns kTfLiteOk on success.
TfLiteStatus (*invoke)(TfLiteContext* context, TfLiteNode* node);
// profiling_string is called during summarization of profiling information
// in order to group executions together. Providing a value here will cause a
// given op to appear multiple times is the profiling report. This is
// particularly useful for custom ops that can perform significantly
// different calculations depending on their `user-data`.
const char* (*profiling_string)(const TfLiteContext* context,
const TfLiteNode* node);
// Builtin codes. If this kernel refers to a builtin this is the code
// of the builtin. This is so we can do marshaling to other frameworks like
// NN API.
// Note: It is the responsibility of the registration binder to set this
// properly.
int32_t builtin_code;
// Custom op name. If the op is a builtin, this will be null.
// Note: It is the responsibility of the registration binder to set this
// properly.
// WARNING: This is an experimental interface that is subject to change.
const char* custom_name;
// The version of the op.
// Note: It is the responsibility of the registration binder to set this
// properly.
int version;
} TfLiteRegistration;
// The flags used in `TfLiteDelegate`. Note that this is a bitmask, so the
// values should be 1, 2, 4, 8, ...etc.
typedef enum TfLiteDelegateFlags {
kTfLiteDelegateFlagsNone = 0,
// The flag is set if the delegate can handle dynamic sized tensors.
// For example, the output shape of a `Resize` op with non-constant shape
// can only be inferred when the op is invoked.
// In this case, the Delegate is responsible for calling
// `SetTensorToDynamic` to mark the tensor as a dynamic tensor, and calling
// `ResizeTensor` when invoking the op.
//
// If the delegate isn't capable to handle dynamic tensors, this flag need
// to be set to false.
kTfLiteDelegateFlagsAllowDynamicTensors = 1,
// This flag can be used by delegates (that allow dynamic tensors) to ensure
// applicable tensor shapes are automatically propagated in the case of tensor
// resizing.
// This means that non-dynamic (allocation_type != kTfLiteDynamic) I/O tensors
// of a delegate kernel will have correct shapes before its Prepare() method
// is called. The runtime leverages TFLite builtin ops in the original
// execution plan to propagate shapes.
//
// A few points to note:
// 1. This requires kTfLiteDelegateFlagsAllowDynamicTensors. If that flag is
// false, this one is redundant since the delegate kernels are re-initialized
// every time tensors are resized.
// 2. Enabling this flag adds some overhead to AllocateTensors(), since extra
// work is required to prepare the original execution plan.
// 3. This flag requires that the original execution plan only have ops with
// valid registrations (and not 'dummy' custom ops like with Flex).
// WARNING: This feature is experimental and subject to change.
kTfLiteDelegateFlagsRequirePropagatedShapes = 2
} TfLiteDelegateFlags;
// WARNING: This is an experimental interface that is subject to change.
typedef struct TfLiteDelegate {
// Data that delegate needs to identify itself. This data is owned by the
// delegate. The delegate is owned in the user code, so the delegate is
// responsible for doing this when it is destroyed.
void* data_;
// Invoked by ModifyGraphWithDelegate. This prepare is called, giving the
// delegate a view of the current graph through TfLiteContext*. It typically
// will look at the nodes and call ReplaceNodeSubsetsWithDelegateKernels()
// to ask the TensorFlow lite runtime to create macro-nodes to represent
// delegated subgraphs of the original graph.
TfLiteStatus (*Prepare)(TfLiteContext* context,
struct TfLiteDelegate* delegate);
// Copy the data from delegate buffer handle into raw memory of the given
// 'tensor'. Note that the delegate is allowed to allocate the raw bytes as
// long as it follows the rules for kTfLiteDynamic tensors, in which case this
// cannot be null.
TfLiteStatus (*CopyFromBufferHandle)(TfLiteContext* context,
struct TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor);
// Copy the data from raw memory of the given 'tensor' to delegate buffer
// handle. This can be null if the delegate doesn't use its own buffer.
TfLiteStatus (*CopyToBufferHandle)(TfLiteContext* context,
struct TfLiteDelegate* delegate,
TfLiteBufferHandle buffer_handle,
TfLiteTensor* tensor);
// Free the Delegate Buffer Handle. Note: This only frees the handle, but
// this doesn't release the underlying resource (e.g. textures). The
// resources are either owned by application layer or the delegate.
// This can be null if the delegate doesn't use its own buffer.
void (*FreeBufferHandle)(TfLiteContext* context,
struct TfLiteDelegate* delegate,
TfLiteBufferHandle* handle);
// Bitmask flags. See the comments in `TfLiteDelegateFlags`.
int64_t flags;
} TfLiteDelegate;
// Build a 'null' delegate, with all the fields properly set to their default
// values.
TfLiteDelegate TfLiteDelegateCreate();
#ifdef __cplusplus
} // extern "C"
#endif // __cplusplus
#endif // TENSORFLOW_LITE_C_COMMON_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/c/common.h | C | apache-2.0 | 40,819 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Compatibility shim for moved header location.
#ifndef TENSORFLOW_LITE_CONTEXT_H_
#define TENSORFLOW_LITE_CONTEXT_H_
#include "tensorflow/lite/c/common.h"
#endif // TENSORFLOW_LITE_CONTEXT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/context.h | C | apache-2.0 | 866 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This provides a few C++ helpers that are useful for manipulating C structures
// in C++.
#ifndef TENSORFLOW_LITE_CONTEXT_UTIL_H_
#define TENSORFLOW_LITE_CONTEXT_UTIL_H_
#include <stddef.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
// Provide a range iterable wrapper for TfLiteIntArray* (C lists that TfLite
// C api uses. Can't use the google array_view, since we can't depend on even
// absl for embedded device reasons.
class TfLiteIntArrayView {
public:
// Construct a view of a TfLiteIntArray*. Note, `int_array` should be non-null
// and this view does not take ownership of it.
explicit TfLiteIntArrayView(const TfLiteIntArray* int_array)
: int_array_(int_array) {}
TfLiteIntArrayView(const TfLiteIntArrayView&) = default;
TfLiteIntArrayView& operator=(const TfLiteIntArrayView& rhs) = default;
typedef const int* const_iterator;
const_iterator begin() const { return int_array_->data; }
const_iterator end() const { return &int_array_->data[int_array_->size]; }
size_t size() const { return end() - begin(); }
int operator[](size_t pos) const { return int_array_->data[pos]; }
private:
const TfLiteIntArray* int_array_;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_CONTEXT_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/context_util.h | C++ | apache-2.0 | 1,923 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/core/api/error_reporter.h"
#include <cstdarg>
namespace tflite {
int ErrorReporter::Report(const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
// TODO(aselle): Make the name of ReportError on context the same, so
// we can use the ensure functions w/o a context and w/ a reporter.
int ErrorReporter::ReportError(void*, const char* format, ...) {
va_list args;
va_start(args, format);
int code = Report(format, args);
va_end(args);
return code;
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/error_reporter.cc | C++ | apache-2.0 | 1,263 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
#define TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
#include <cstdarg>
namespace tflite {
/// A functor that reports error to supporting system. Invoked similar to
/// printf.
///
/// Usage:
/// ErrorReporter foo;
/// foo.Report("test %d", 5);
/// or
/// va_list args;
/// foo.Report("test %d", args); // where args is va_list
///
/// Subclass ErrorReporter to provide another reporting destination.
/// For example, if you have a GUI program, you might redirect to a buffer
/// that drives a GUI error log box.
class ErrorReporter {
public:
virtual ~ErrorReporter() {}
virtual int Report(const char* format, va_list args) = 0;
int Report(const char* format, ...);
int ReportError(void*, const char* format, ...);
};
} // namespace tflite
// You should not make bare calls to the error reporter, instead use the
// TF_LITE_REPORT_ERROR macro, since this allows message strings to be
// stripped when the binary size has to be optimized. If you are looking to
// reduce binary size, define TF_LITE_STRIP_ERROR_STRINGS when compiling and
// every call will be stubbed out, taking no memory.
#ifndef TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_REPORT_ERROR(reporter, ...) \
do { \
static_cast<tflite::ErrorReporter*>(reporter)->Report(__VA_ARGS__); \
} while (false)
#else // TF_LITE_STRIP_ERROR_STRINGS
#define TF_LITE_REPORT_ERROR(reporter, ...)
#endif // TF_LITE_STRIP_ERROR_STRINGS
#endif // TENSORFLOW_LITE_CORE_API_ERROR_REPORTER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/error_reporter.h | C++ | apache-2.0 | 2,279 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/core/api/flatbuffer_conversions.h"
#include <cstddef>
#include <cstdint>
#include <memory>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
namespace {
// Utility class for safely allocating POD data. This is useful for avoiding
// leaks in cases where op params are allocated but fail to propagate to the
// parsed op data (e.g., when model parameters are invalid).
class SafeBuiltinDataAllocator {
public:
class BuiltinDataDeleter {
public:
explicit BuiltinDataDeleter(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
void operator()(void* data) { allocator_->Deallocate(data); }
private:
BuiltinDataAllocator* allocator_;
};
template <typename T>
using BuiltinDataPtr = std::unique_ptr<T, BuiltinDataDeleter>;
explicit SafeBuiltinDataAllocator(BuiltinDataAllocator* allocator)
: allocator_(allocator) {}
template <typename T>
BuiltinDataPtr<T> Allocate() {
return BuiltinDataPtr<T>(allocator_->AllocatePOD<T>(),
BuiltinDataDeleter(allocator_));
}
private:
BuiltinDataAllocator* allocator_;
};
// All the Parse functions take some pointers as params and this function has
// the common DCHECKs to catch if any of those are nullptr.
void CheckParsePointerParams(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
TFLITE_DCHECK(op != nullptr);
TFLITE_DCHECK(error_reporter != nullptr);
TFLITE_DCHECK(allocator != nullptr);
TFLITE_DCHECK(builtin_data != nullptr);
}
// Copies the contents from the flatbuffer int vector `flatbuffer` into the
// int array `buffer`. `flat_vector` and `buffer` represent the same
// configuration operation for a given operation.
TfLiteStatus FlatBufferIntVectorToArray(
int max_size_of_buffer, const flatbuffers::Vector<int32_t>* flat_vector,
int* buffer, ErrorReporter* error_reporter, const char* op_name) {
if (!flat_vector) {
TF_LITE_REPORT_ERROR(error_reporter,
"Input array not provided for operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
size_t num_dimensions = flat_vector->size();
if (num_dimensions > max_size_of_buffer / sizeof(int)) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Found too many dimensions in the input array of operation '%s'.\n",
op_name);
return kTfLiteError;
} else {
for (size_t i = 0; i < num_dimensions; ++i) {
buffer[i] = flat_vector->Get(i);
}
}
}
return kTfLiteOk;
}
// Converts the flatbuffer activation to what is used at runtime.
TfLiteFusedActivation ConvertActivation(ActivationFunctionType activation) {
switch (activation) {
case ActivationFunctionType_NONE:
return kTfLiteActNone;
case ActivationFunctionType_RELU:
return kTfLiteActRelu;
case ActivationFunctionType_RELU_N1_TO_1:
return kTfLiteActReluN1To1;
case ActivationFunctionType_RELU6:
return kTfLiteActRelu6;
case ActivationFunctionType_TANH:
return kTfLiteActTanh;
case ActivationFunctionType_SIGN_BIT:
return kTfLiteActSignBit;
}
return kTfLiteActNone;
}
// Converts the flatbuffer padding enum to what is used at runtime.
TfLitePadding ConvertPadding(Padding padding) {
switch (padding) {
case Padding_SAME:
return kTfLitePaddingSame;
case Padding_VALID:
return kTfLitePaddingValid;
}
return kTfLitePaddingUnknown;
}
#ifndef TF_LITE_STATIC_MEMORY
TfLiteStatus ParseOpDataTfLite(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
auto parseLSHProjectionType = [](LSHProjectionType type) {
switch (type) {
case LSHProjectionType_SPARSE:
return kTfLiteLshProjectionSparse;
case LSHProjectionType_DENSE:
return kTfLiteLshProjectionDense;
default:
return kTfLiteLshProjectionUnknown;
}
};
auto parseCombinerType = [](CombinerType type) {
switch (type) {
case CombinerType_MEAN:
return kTfLiteCombinerTypeMean;
case CombinerType_SQRTN:
return kTfLiteCombinerTypeSqrtn;
case CombinerType_SUM:
default:
return kTfLiteCombinerTypeSum;
}
};
SafeBuiltinDataAllocator safe_allocator(allocator);
*builtin_data = nullptr;
switch (op_type) {
case BuiltinOperator_ABS: {
return ParseAbs(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD: {
return ParseAdd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ADD_N: {
return ParseAddN(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MAX: {
return ParseArgMax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ARG_MIN: {
return ParseArgMin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_AVERAGE_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_MATMUL: {
return ParseBatchMatMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_BATCH_TO_SPACE_ND: {
return ParseBatchToSpaceNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CEIL: {
return ParseCeil(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONCATENATION: {
return ParseConcatenation(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CONV_2D: {
return ParseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CUMSUM: {
return ParseCumsum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTH_TO_SPACE: {
return ParseDepthToSpace(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEPTHWISE_CONV_2D: {
return ParseDepthwiseConv2D(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DEQUANTIZE: {
return ParseDequantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_DIV: {
return ParseDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ELU: {
return ParseElu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXP: {
return ParseExp(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_EXPAND_DIMS: {
return ParseExpandDims(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FILL: {
return ParseFill(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR: {
return ParseFloor(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_DIV: {
return ParseFloorDiv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FLOOR_MOD: {
return ParseFloorMod(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_FULLY_CONNECTED: {
return ParseFullyConnected(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GATHER_ND: {
return ParseGatherNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER: {
return ParseGreater(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_GREATER_EQUAL: {
return ParseGreaterEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_HARD_SWISH: {
return ParseHardSwish(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_NORMALIZATION: {
return ParseL2Normalization(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_L2_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LEAKY_RELU: {
return ParseLeakyRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS: {
return ParseLess(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LESS_EQUAL: {
return ParseLessEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG: {
return ParseLog(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_AND: {
return ParseLogicalAnd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_NOT: {
return ParseLogicalNot(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGICAL_OR: {
return ParseLogicalOr(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOGISTIC: {
return ParseLogistic(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LOG_SOFTMAX: {
return ParseLogSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAXIMUM: {
return ParseMaximum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MAX_POOL_2D: {
return ParsePool(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MEAN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MINIMUM: {
return ParseMinimum(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_MUL: {
return ParseMul(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_NEG: {
return ParseNeg(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_NOT_EQUAL: {
return ParseNotEqual(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PACK: {
return ParsePack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PAD: {
return ParsePad(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PADV2: {
return ParsePadV2(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_POW: {
return ParsePow(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_PRELU: {
return ParsePrelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_QUANTIZE: {
return ParseQuantize(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ANY: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_ALL: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MAX: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_MIN: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_REDUCE_PROD: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RELU: {
return ParseRelu(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RELU6: {
return ParseRelu6(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESHAPE: {
return ParseReshape(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESIZE_BILINEAR: {
return ParseResizeBilinear(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RESIZE_NEAREST_NEIGHBOR: {
return ParseResizeNearestNeighbor(op, error_reporter, allocator,
builtin_data);
}
case BuiltinOperator_ROUND: {
return ParseRound(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_RSQRT: {
return ParseRsqrt(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SHAPE: {
return ParseShape(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SIN: {
return ParseSin(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SOFTMAX: {
return ParseSoftmax(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_BATCH_ND: {
return ParseSpaceToBatchNd(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPACE_TO_DEPTH: {
return ParseSpaceToDepth(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT: {
return ParseSplit(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPLIT_V: {
return ParseSplitV(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQRT: {
return ParseSqrt(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUARE: {
return ParseSquare(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SQUEEZE: {
return ParseSqueeze(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_STRIDED_SLICE: {
return ParseStridedSlice(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SUB: {
return ParseSub(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SUM: {
return ParseReducer(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SVDF: {
return ParseSvdf(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TANH: {
return ParseTanh(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_TRANSPOSE_CONV: {
return ParseTransposeConv(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_UNPACK: {
return ParseUnpack(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_ZEROS_LIKE: {
return ParseZerosLike(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_CAST: {
return ParseCast(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_LSH_PROJECTION: {
auto params = safe_allocator.Allocate<TfLiteLSHProjectionParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lshParams =
op->builtin_options_as_LSHProjectionOptions()) {
params->type = parseLSHProjectionType(lshParams->type());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_RNN: {
auto params = safe_allocator.Allocate<TfLiteSequenceRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sequence_rnn_params =
op->builtin_options_as_SequenceRNNOptions()) {
params->activation =
ConvertActivation(sequence_rnn_params->fused_activation_function());
params->time_major = sequence_rnn_params->time_major();
params->asymmetric_quantize_inputs =
sequence_rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_RNN: {
auto params =
safe_allocator.Allocate<TfLiteBidirectionalSequenceRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_sequence_rnn_params =
op->builtin_options_as_BidirectionalSequenceRNNOptions()) {
params->activation = ConvertActivation(
bidi_sequence_rnn_params->fused_activation_function());
params->time_major = bidi_sequence_rnn_params->time_major();
params->merge_outputs = bidi_sequence_rnn_params->merge_outputs();
params->asymmetric_quantize_inputs =
bidi_sequence_rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_RNN: {
auto params = safe_allocator.Allocate<TfLiteRNNParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* rnn_params = op->builtin_options_as_RNNOptions()) {
params->activation =
ConvertActivation(rnn_params->fused_activation_function());
params->asymmetric_quantize_inputs =
rnn_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_EMBEDDING_LOOKUP_SPARSE: {
auto params =
safe_allocator.Allocate<TfLiteEmbeddingLookupSparseParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* embedding_params =
op->builtin_options_as_EmbeddingLookupSparseOptions()) {
params->combiner = parseCombinerType(embedding_params->combiner());
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE_LOOKUP:
// no-op.
return kTfLiteOk;
case BuiltinOperator_LOCAL_RESPONSE_NORMALIZATION: {
auto params = safe_allocator.Allocate<TfLiteLocalResponseNormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_LocalResponseNormalizationOptions()) {
params->radius = schema_params->radius();
params->bias = schema_params->bias();
params->alpha = schema_params->alpha();
params->beta = schema_params->beta();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_LSTM: {
auto params = safe_allocator.Allocate<TfLiteLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* lstm_params = op->builtin_options_as_LSTMOptions()) {
params->activation =
ConvertActivation(lstm_params->fused_activation_function());
params->cell_clip = lstm_params->cell_clip();
params->proj_clip = lstm_params->proj_clip();
switch (lstm_params->kernel_type()) {
case LSTMKernelType_FULL:
params->kernel_type = kTfLiteLSTMFullKernel;
break;
case LSTMKernelType_BASIC:
params->kernel_type = kTfLiteLSTMBasicKernel;
break;
default:
TF_LITE_REPORT_ERROR(error_reporter,
"Unhandled LSTM kernel type: %d",
lstm_params->kernel_type());
return kTfLiteError;
}
params->asymmetric_quantize_inputs =
lstm_params->asymmetric_quantize_inputs();
} else {
TF_LITE_REPORT_ERROR(error_reporter,
"No valid LSTM builtin options exist");
return kTfLiteError;
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIDIRECTIONAL_SEQUENCE_LSTM: {
auto params =
safe_allocator.Allocate<TfLiteUnidirectionalSequenceLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* seq_lstm_params =
op->builtin_options_as_UnidirectionalSequenceLSTMOptions()) {
params->activation =
ConvertActivation(seq_lstm_params->fused_activation_function());
params->cell_clip = seq_lstm_params->cell_clip();
params->proj_clip = seq_lstm_params->proj_clip();
params->time_major = seq_lstm_params->time_major();
params->asymmetric_quantize_inputs =
seq_lstm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_BIDIRECTIONAL_SEQUENCE_LSTM: {
auto params =
safe_allocator.Allocate<TfLiteBidirectionalSequenceLSTMParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bidi_lstm_params =
op->builtin_options_as_BidirectionalSequenceLSTMOptions()) {
params->activation =
ConvertActivation(bidi_lstm_params->fused_activation_function());
params->cell_clip = bidi_lstm_params->cell_clip();
params->proj_clip = bidi_lstm_params->proj_clip();
params->merge_outputs = bidi_lstm_params->merge_outputs();
params->time_major = bidi_lstm_params->time_major();
params->asymmetric_quantize_inputs =
bidi_lstm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_SKIP_GRAM: {
auto params = safe_allocator.Allocate<TfLiteSkipGramParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* skip_gram_params =
op->builtin_options_as_SkipGramOptions()) {
params->ngram_size = skip_gram_params->ngram_size();
params->max_skip_size = skip_gram_params->max_skip_size();
params->include_all_ngrams = skip_gram_params->include_all_ngrams();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_GATHER: {
return ParseGather(op, error_reporter, allocator, builtin_data);
}
case BuiltinOperator_SPARSE_TO_DENSE: {
auto params = safe_allocator.Allocate<TfLiteSparseToDenseParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* sparse_to_dense_params =
op->builtin_options_as_SparseToDenseOptions()) {
params->validate_indices = sparse_to_dense_params->validate_indices();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_DELEGATE: {
TF_LITE_REPORT_ERROR(error_reporter,
"DELEGATE op shouldn't exist in model.");
return kTfLiteError;
}
case BuiltinOperator_FAKE_QUANT: {
auto params = safe_allocator.Allocate<TfLiteFakeQuantParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params =
op->builtin_options_as_FakeQuantOptions()) {
params->min = schema_params->min();
params->max = schema_params->max();
params->num_bits = schema_params->num_bits();
params->narrow_range = schema_params->narrow_range();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_ONE_HOT: {
auto params = safe_allocator.Allocate<TfLiteOneHotParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_OneHotOptions()) {
params->axis = schema_params->axis();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_MIRROR_PAD: {
auto params = safe_allocator.Allocate<TfLiteMirrorPaddingParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* mirror_pad_params = op->builtin_options_as_MirrorPadOptions();
if (mirror_pad_params != nullptr) {
params->mode =
mirror_pad_params->mode() == tflite::MirrorPadMode_REFLECT
? TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect
: TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingSymmetric;
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_UNIQUE: {
auto params = safe_allocator.Allocate<TfLiteUniqueParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* unique_params = op->builtin_options_as_UniqueOptions();
if (unique_params != nullptr) {
params->index_out_type =
unique_params->idx_out_type() == tflite::TensorType_INT64
? TfLiteType::kTfLiteInt64
: TfLiteType::kTfLiteInt32;
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_REVERSE_SEQUENCE: {
auto params = safe_allocator.Allocate<TfLiteReverseSequenceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* reverse_seq_params =
op->builtin_options_as_ReverseSequenceOptions()) {
params->seq_dim = reverse_seq_params->seq_dim();
params->batch_dim = reverse_seq_params->batch_dim();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_IF: {
auto params = safe_allocator.Allocate<TfLiteIfParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* if_params = op->builtin_options_as_IfOptions()) {
params->then_subgraph_index = if_params->then_subgraph_index();
params->else_subgraph_index = if_params->else_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_WHILE: {
auto params = safe_allocator.Allocate<TfLiteWhileParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* while_params = op->builtin_options_as_WhileOptions()) {
params->cond_subgraph_index = while_params->cond_subgraph_index();
params->body_subgraph_index = while_params->body_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CALL_ONCE: {
auto params = safe_allocator.Allocate<TfLiteCallOnceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* call_once_params =
op->builtin_options_as_CallOnceOptions()) {
params->init_subgraph_index = call_once_params->init_subgraph_index();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_CONV_3D: {
auto params = safe_allocator.Allocate<TfLiteConv3DParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* conv3d_params = op->builtin_options_as_Conv3DOptions()) {
params->padding = ConvertPadding(conv3d_params->padding());
params->activation =
ConvertActivation(conv3d_params->fused_activation_function());
params->stride_depth = conv3d_params->stride_d();
params->stride_height = conv3d_params->stride_h();
params->stride_width = conv3d_params->stride_w();
params->dilation_depth_factor = conv3d_params->dilation_d_factor();
params->dilation_height_factor = conv3d_params->dilation_h_factor();
params->dilation_width_factor = conv3d_params->dilation_w_factor();
}
*builtin_data = params.release();
return kTfLiteOk;
}
case BuiltinOperator_HASHTABLE: {
auto params = safe_allocator.Allocate<TfLiteHashtableParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* hashtable_params =
op->builtin_options_as_HashtableOptions()) {
params->table_id = hashtable_params->table_id();
TF_LITE_ENSURE_STATUS(ConvertTensorType(
hashtable_params->key_dtype(), ¶ms->key_dtype, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(hashtable_params->value_dtype(),
¶ms->value_dtype,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
// Below are the ops with no builtin_data structure.
// TODO(aselle): Implement call in BuiltinOptions, but nullptrs are
// ok for now, since there is no call implementation either.
case BuiltinOperator_CALL:
case BuiltinOperator_CONCAT_EMBEDDINGS:
case BuiltinOperator_COS:
case BuiltinOperator_CUSTOM:
case BuiltinOperator_EMBEDDING_LOOKUP:
case BuiltinOperator_EQUAL:
case BuiltinOperator_MATRIX_DIAG:
case BuiltinOperator_MATRIX_SET_DIAG:
case BuiltinOperator_RELU_N1_TO_1:
case BuiltinOperator_SELECT:
case BuiltinOperator_SELECT_V2:
case BuiltinOperator_SLICE:
case BuiltinOperator_TILE:
case BuiltinOperator_TOPK_V2:
case BuiltinOperator_TRANSPOSE:
case BuiltinOperator_RANGE:
case BuiltinOperator_SQUARED_DIFFERENCE:
case BuiltinOperator_REVERSE_V2:
case BuiltinOperator_WHERE:
case BuiltinOperator_RANK:
case BuiltinOperator_NON_MAX_SUPPRESSION_V4:
case BuiltinOperator_NON_MAX_SUPPRESSION_V5:
case BuiltinOperator_SCATTER_ND:
case BuiltinOperator_DENSIFY:
case BuiltinOperator_SEGMENT_SUM:
case BuiltinOperator_BROADCAST_TO:
case BuiltinOperator_RFFT2D:
case BuiltinOperator_IMAG:
case BuiltinOperator_REAL:
case BuiltinOperator_COMPLEX_ABS:
case BuiltinOperator_HASHTABLE_FIND:
case BuiltinOperator_HASHTABLE_IMPORT:
case BuiltinOperator_HASHTABLE_SIZE:
return kTfLiteOk;
case BuiltinOperator_PLACEHOLDER_FOR_GREATER_OP_CODES:
return kTfLiteError;
}
return kTfLiteError;
} // NOLINT[readability/fn_size]
#endif // !defined(TF_LITE_STATIC_MEMORY)
} // namespace
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter) {
switch (tensor_type) {
case TensorType_FLOAT16:
*type = kTfLiteFloat16;
return kTfLiteOk;
case TensorType_FLOAT32:
*type = kTfLiteFloat32;
return kTfLiteOk;
case TensorType_FLOAT64:
*type = kTfLiteFloat64;
return kTfLiteOk;
case TensorType_INT16:
*type = kTfLiteInt16;
return kTfLiteOk;
case TensorType_INT32:
*type = kTfLiteInt32;
return kTfLiteOk;
case TensorType_UINT32:
*type = kTfLiteUInt32;
return kTfLiteOk;
case TensorType_UINT8:
*type = kTfLiteUInt8;
return kTfLiteOk;
case TensorType_INT8:
*type = kTfLiteInt8;
return kTfLiteOk;
case TensorType_INT64:
*type = kTfLiteInt64;
return kTfLiteOk;
case TensorType_UINT64:
*type = kTfLiteUInt64;
return kTfLiteOk;
case TensorType_STRING:
*type = kTfLiteString;
return kTfLiteOk;
case TensorType_BOOL:
*type = kTfLiteBool;
return kTfLiteOk;
case TensorType_COMPLEX64:
*type = kTfLiteComplex64;
return kTfLiteOk;
case TensorType_COMPLEX128:
*type = kTfLiteComplex128;
return kTfLiteOk;
case TensorType_RESOURCE:
*type = kTfLiteResource;
return kTfLiteOk;
case TensorType_VARIANT:
*type = kTfLiteVariant;
return kTfLiteOk;
default:
*type = kTfLiteNoType;
TF_LITE_REPORT_ERROR(error_reporter,
"Unsupported data type %d in tensor\n", tensor_type);
return kTfLiteError;
}
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseAbs(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteAddParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteAddParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const AddOptions* schema_params = op->builtin_options_as_AddOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
return kTfLiteOk;
}
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteArgMaxParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteArgMaxParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ArgMaxOptions* schema_params = op->builtin_options_as_ArgMaxOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->output_type(), ¶ms->output_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteArgMinParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteArgMinParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ArgMinOptions* schema_params = op->builtin_options_as_ArgMinOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->output_type(), ¶ms->output_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteBatchMatMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* bmm_params = op->builtin_options_as_BatchMatMulOptions()) {
params->adj_x = bmm_params->adj_x();
params->adj_y = bmm_params->adj_y();
params->asymmetric_quantize_inputs =
bmm_params->asymmetric_quantize_inputs();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseBatchToSpaceNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCastParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_CastOptions()) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(
schema_params->in_data_type(), ¶ms->in_data_type, error_reporter));
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_data_type(),
¶ms->out_data_type,
error_reporter));
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseCeil(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseConcatenation(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteConcatenationParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteConcatenationParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ConcatenationOptions* schema_params =
op->builtin_options_as_ConcatenationOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->axis = schema_params->axis();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const Conv2DOptions* schema_params = op->builtin_options_as_Conv2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteCumsumParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* cumsum_params = op->builtin_options_as_CumsumOptions()) {
params->exclusive = cumsum_params->exclusive();
params->reverse = cumsum_params->reverse();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseCos(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthToSpaceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthToSpaceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_DepthToSpaceOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteDepthwiseConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteDepthwiseConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const DepthwiseConv2DOptions* schema_params =
op->builtin_options_as_DepthwiseConv2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->depth_multiplier = schema_params->depth_multiplier();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->dilation_width_factor = schema_params->dilation_w_factor();
params->dilation_height_factor = schema_params->dilation_h_factor();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseDequantize(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteDivParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* schema_params = op->builtin_options_as_DivOptions()) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseElu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseEqual(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseExp(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseExpandDims(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFill(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFloor(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFloorDiv(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseFloorMod(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteFullyConnectedParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteFullyConnectedParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const FullyConnectedOptions* schema_params =
op->builtin_options_as_FullyConnectedOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->keep_num_dims = schema_params->keep_num_dims();
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();
switch (schema_params->weights_format()) {
case FullyConnectedOptionsWeightsFormat_DEFAULT:
params->weights_format = kTfLiteFullyConnectedWeightsFormatDefault;
break;
case FullyConnectedOptionsWeightsFormat_SHUFFLED4x16INT8:
params->weights_format =
kTfLiteFullyConnectedWeightsFormatShuffled4x16Int8;
break;
default:
TF_LITE_REPORT_ERROR(error_reporter,
"Unhandled fully-connected weights format.");
return kTfLiteError;
}
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteGatherParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
params->axis = 0;
params->batch_dims = 0;
if (const auto* gather_params = op->builtin_options_as_GatherOptions()) {
params->axis = gather_params->axis();
params->batch_dims = gather_params->batch_dims();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseGatherNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseGreater(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseGreaterEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseHardSwish(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteL2NormParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteL2NormParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const L2NormOptions* schema_params = op->builtin_options_as_L2NormOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
auto params = safe_allocator.Allocate<TfLiteLeakyReluParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
if (const auto* leaky_relu_params =
op->builtin_options_as_LeakyReluOptions()) {
params->alpha = leaky_relu_params->alpha();
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLess(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLessEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLog(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLogicalAnd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLogicalNot(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLogicalOr(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLogistic(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseLogSoftmax(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseMaximum(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseMinimum(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteMulParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteMulParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const MulOptions* schema_params = op->builtin_options_as_MulOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseNeg(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseNotEqual(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLitePackParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLitePackParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const PackOptions* schema_params = op->builtin_options_as_PackOptions();
if (schema_params != nullptr) {
params->values_count = schema_params->values_count();
params->axis = schema_params->axis();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParsePad(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParsePadV2(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLitePoolParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLitePoolParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const Pool2DOptions* schema_params = op->builtin_options_as_Pool2DOptions();
if (schema_params != nullptr) {
params->padding = ConvertPadding(schema_params->padding());
params->stride_width = schema_params->stride_w();
params->stride_height = schema_params->stride_h();
params->filter_width = schema_params->filter_width();
params->filter_height = schema_params->filter_height();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParsePow(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParsePrelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseQuantize(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteReducerParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteReducerParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ReducerOptions* schema_params = op->builtin_options_as_ReducerOptions();
if (schema_params != nullptr) {
params->keep_dims = schema_params->keep_dims();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseRelu(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseRelu6(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteReshapeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteReshapeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ReshapeOptions* schema_params = op->builtin_options_as_ReshapeOptions();
if (schema_params != nullptr) {
const flatbuffers::Vector<int32_t>* new_shape = schema_params->new_shape();
if (new_shape != nullptr) {
TF_LITE_ENSURE_STATUS(
FlatBufferIntVectorToArray(sizeof(params->shape), new_shape,
params->shape, error_reporter, "reshape"));
params->num_dimensions = new_shape->size();
} else {
// TODO(b/157480169) TODO(b/147203660): We should either return
// kTfLiteError or fill in some reasonable defaults in the params struct.
// We are not doing so until we better undertand the ramifications of
// changing the legacy behavior.
}
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseResizeBilinear(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteResizeBilinearParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteResizeBilinearParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ResizeBilinearOptions* schema_params =
op->builtin_options_as_ResizeBilinearOptions();
if (schema_params != nullptr) {
params->align_corners = schema_params->align_corners();
params->half_pixel_centers = schema_params->half_pixel_centers();
} else {
params->align_corners = false;
params->half_pixel_centers = false;
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteResizeNearestNeighborParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteResizeNearestNeighborParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ResizeNearestNeighborOptions* schema_params =
op->builtin_options_as_ResizeNearestNeighborOptions();
if (schema_params != nullptr) {
params->align_corners = schema_params->align_corners();
params->half_pixel_centers = schema_params->half_pixel_centers();
} else {
params->align_corners = false;
params->half_pixel_centers = false;
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseRound(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseRsqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteShapeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteShapeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const ShapeOptions* schema_params = op->builtin_options_as_ShapeOptions();
if (schema_params != nullptr) {
TF_LITE_ENSURE_STATUS(ConvertTensorType(schema_params->out_type(),
¶ms->out_type, error_reporter));
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseSin(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSoftmaxParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSoftmaxParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SoftmaxOptions* schema_params = op->builtin_options_as_SoftmaxOptions();
if (schema_params != nullptr) {
params->beta = schema_params->beta();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseSpaceToBatchNd(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSpaceToDepthParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSpaceToDepthParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const auto* schema_params = op->builtin_options_as_SpaceToDepthOptions();
if (schema_params != nullptr) {
params->block_size = schema_params->block_size();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSplitParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSplitParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SplitOptions* schema_params = op->builtin_options_as_SplitOptions();
if (schema_params != nullptr) {
params->num_splits = schema_params->num_splits();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSplitVParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSplitVParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SplitVOptions* schema_params = op->builtin_options_as_SplitVOptions();
if (schema_params != nullptr) {
params->num_splits = schema_params->num_splits();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSqueezeParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSqueezeParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SqueezeOptions* schema_params = op->builtin_options_as_SqueezeOptions();
if (schema_params != nullptr) {
const auto* squeeze_dims = schema_params->squeeze_dims();
if (squeeze_dims != nullptr) {
TF_LITE_ENSURE_STATUS(FlatBufferIntVectorToArray(
sizeof(params->squeeze_dims), squeeze_dims, params->squeeze_dims,
error_reporter, "squeeze"));
params->num_squeeze_dims = squeeze_dims->size();
} else {
params->num_squeeze_dims = 0;
}
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseSqrt(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseSquare(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
TfLiteStatus ParseStridedSlice(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteStridedSliceParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteStridedSliceParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const StridedSliceOptions* schema_params =
op->builtin_options_as_StridedSliceOptions();
if (schema_params != nullptr) {
params->begin_mask = schema_params->begin_mask();
params->end_mask = schema_params->end_mask();
params->ellipsis_mask = schema_params->ellipsis_mask();
params->new_axis_mask = schema_params->new_axis_mask();
params->shrink_axis_mask = schema_params->shrink_axis_mask();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSubParams, SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSubParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SubOptions* schema_params = op->builtin_options_as_SubOptions();
if (schema_params != nullptr) {
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->pot_scale_int16 = schema_params->pot_scale_int16();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteSVDFParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteSVDFParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const SVDFOptions* schema_params = op->builtin_options_as_SVDFOptions();
if (schema_params != nullptr) {
params->rank = schema_params->rank();
params->activation =
ConvertActivation(schema_params->fused_activation_function());
params->asymmetric_quantize_inputs =
schema_params->asymmetric_quantize_inputs();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseTanh(const Operator*, ErrorReporter*, BuiltinDataAllocator*,
void**) {
return kTfLiteOk;
}
//
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseTranspose(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteTransposeConvParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteTransposeConvParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const TransposeConvOptions* transpose_conv_params =
op->builtin_options_as_TransposeConvOptions();
if (transpose_conv_params != nullptr) {
params->padding = ConvertPadding(transpose_conv_params->padding());
params->stride_width = transpose_conv_params->stride_w();
params->stride_height = transpose_conv_params->stride_h();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
CheckParsePointerParams(op, error_reporter, allocator, builtin_data);
SafeBuiltinDataAllocator safe_allocator(allocator);
std::unique_ptr<TfLiteUnpackParams,
SafeBuiltinDataAllocator::BuiltinDataDeleter>
params = safe_allocator.Allocate<TfLiteUnpackParams>();
TF_LITE_ENSURE(error_reporter, params != nullptr);
const UnpackOptions* schema_params = op->builtin_options_as_UnpackOptions();
if (schema_params != nullptr) {
params->num = schema_params->num();
params->axis = schema_params->axis();
} else {
// TODO(b/157480169): We should either return kTfLiteError or fill in some
// reasonable defaults in the params struct. We are not doing so until we
// better undertand the ramifications of changing the legacy behavior.
}
*builtin_data = params.release();
return kTfLiteOk;
}
// We have this parse function instead of directly returning kTfLiteOk from the
// switch-case in ParseOpData because this function is used as part of the
// selective registration for the OpResolver implementation in micro.
TfLiteStatus ParseZerosLike(const Operator*, ErrorReporter*,
BuiltinDataAllocator*, void**) {
return kTfLiteOk;
}
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data) {
// TODO(b/145762662): It would be preferable to have the build graph for TF Lite
// Micro not have the ParseOpData function at all. This would require splitting
// the current file into two separate files, one of which defines the
// ParseOpData function and the other that defines the operator specific parse
// functions (e.g. ParseAdd).
//
// Such a split was attempted but was not worth the effort at the time because
// of the following reasons:
// * We could either duplicate the functions and the SafeBuiltinDataAllocator
// class in the anonymous namespace of this file, or attempt to make a common
// library with these helper functions and class.
// * Making a common library with a separate build target was not feasible as
// it introduced circular dependencies due to the ErrorReporter and a common
// .cc and .h within the same api build target the also cause circular
// dependencies due to the BuiltinDataAllocator class.
// * If all the builtin operators were to have their own parse functions, or we
// were ok with some amount of code duplication, then this split of the .cc
// files would be a lot more feasible.
#ifdef TF_LITE_STATIC_MEMORY
TF_LITE_REPORT_ERROR(
error_reporter,
"ParseOpData is unsupported on TfLiteMicro, please use the operator "
"specific parse functions (e.g. ParseAdd etc.).\n");
return kTfLiteError;
#else
return ParseOpDataTfLite(op, op_type, error_reporter, allocator,
builtin_data);
#endif
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/flatbuffer_conversions.cc | C++ | apache-2.0 | 86,423 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
#define TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
// These functions transform codes and data structures that are defined in the
// flatbuffer serialization format into in-memory values that are used by the
// runtime API and interpreter.
#include <cstddef>
#include <new>
#include <type_traits>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
// Interface class for builtin data allocations.
class BuiltinDataAllocator {
public:
virtual void* Allocate(size_t size, size_t alignment_hint) = 0;
virtual void Deallocate(void* data) = 0;
// Allocate a structure, but make sure it is a POD structure that doesn't
// require constructors to run. The reason we do this, is that Interpreter's C
// extension part will take ownership so destructors will not be run during
// deallocation.
template <typename T>
T* AllocatePOD() {
// TODO(b/154346074): Change this to is_trivially_destructible when all
// platform targets support that properly.
static_assert(std::is_pod<T>::value, "Builtin data structure must be POD.");
void* allocated_memory = this->Allocate(sizeof(T), alignof(T));
return new (allocated_memory) T();
}
virtual ~BuiltinDataAllocator() {}
};
// Parse the appropriate data out of the op.
//
// This handles builtin data explicitly as there are flatbuffer schemas.
// If it returns kTfLiteOk, it passes the data out with `builtin_data`. The
// calling function has to pass in an allocator object, and this allocator
// will be called to reserve space for the output data. If the calling
// function's allocator reserves memory on the heap, then it's the calling
// function's responsibility to free it.
// If it returns kTfLiteError, `builtin_data` will be `nullptr`.
TfLiteStatus ParseOpData(const Operator* op, BuiltinOperator op_type,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
// Converts the tensor data type used in the flat buffer to the representation
// used by the runtime.
TfLiteStatus ConvertTensorType(TensorType tensor_type, TfLiteType* type,
ErrorReporter* error_reporter);
TfLiteStatus ParseAbs(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseAdd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseAddN(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseArgMax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseArgMin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseBatchMatMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseBatchToSpaceNd(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseCeil(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCast(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseConcatenation(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseConv2D(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCos(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseCumsum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseDepthToSpace(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDepthwiseConv2D(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDequantize(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseElu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseExp(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseExpandDims(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFill(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseFloor(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseFloorDiv(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFloorMod(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseFullyConnected(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseGather(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseGatherNd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseGreater(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseGreaterEqual(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseHardSwish(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseL2Normalization(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLeakyRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLess(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseLessEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLog(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseLogicalAnd(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogicalNot(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogicalOr(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogistic(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseLogSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseMaximum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseMinimum(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseMul(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseNeg(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseNotEqual(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParsePack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePad(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePadV2(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePool(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePow(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParsePrelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseQuantize(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseReducer(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseRelu(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseRelu6(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseReshape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseResizeBilinear(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseResizeNearestNeighbor(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseRound(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseRsqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseShape(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSin(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSoftmax(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSpaceToBatchNd(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSpaceToDepth(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSplit(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSplitV(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSqueeze(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSqrt(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSquare(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseStridedSlice(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseSub(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseSvdf(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseTanh(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseTranspose(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseTransposeConv(const Operator* op,
ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
TfLiteStatus ParseUnpack(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator, void** builtin_data);
TfLiteStatus ParseZerosLike(const Operator* op, ErrorReporter* error_reporter,
BuiltinDataAllocator* allocator,
void** builtin_data);
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_FLATBUFFER_CONVERSIONS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/flatbuffer_conversions.h | C++ | apache-2.0 | 17,066 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/core/api/op_resolver.h"
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/schema/schema_utils.h"
namespace tflite {
TfLiteStatus GetRegistrationFromOpCode(
const OperatorCode* opcode, const OpResolver& op_resolver,
ErrorReporter* error_reporter, const TfLiteRegistration** registration) {
TfLiteStatus status = kTfLiteOk;
*registration = nullptr;
auto builtin_code = GetBuiltinCode(opcode);
int version = opcode->version();
if (builtin_code > BuiltinOperator_MAX ||
builtin_code < BuiltinOperator_MIN) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Op builtin_code out of range: %d. Are you using old TFLite binary "
"with newer model?",
builtin_code);
status = kTfLiteError;
} else if (builtin_code != BuiltinOperator_CUSTOM) {
*registration = op_resolver.FindOp(builtin_code, version);
if (*registration == nullptr) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Didn't find op for builtin opcode '%s' version '%d'. "
"An older version of this builtin might be supported. "
"Are you using an old TFLite binary with a newer model?\n",
EnumNameBuiltinOperator(builtin_code), version);
status = kTfLiteError;
}
} else if (!opcode->custom_code()) {
TF_LITE_REPORT_ERROR(
error_reporter,
"Operator with CUSTOM builtin_code has no custom_code.\n");
status = kTfLiteError;
} else {
const char* name = opcode->custom_code()->c_str();
*registration = op_resolver.FindOp(name, version);
if (*registration == nullptr) {
// Do not report error for unresolved custom op, we do the final check
// while preparing ops.
status = kTfLiteError;
}
}
return status;
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/op_resolver.cc | C++ | apache-2.0 | 2,594 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
#include <memory>
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/schema/schema_generated.h"
namespace tflite {
/// Abstract interface that returns TfLiteRegistrations given op codes or custom
/// op names. This is the mechanism that ops being referenced in the flatbuffer
/// model are mapped to executable function pointers (TfLiteRegistrations).
class OpResolver {
public:
/// Finds the op registration for a builtin operator by enum code.
virtual const TfLiteRegistration* FindOp(tflite::BuiltinOperator op,
int version) const = 0;
/// Finds the op registration of a custom operator by op name.
virtual const TfLiteRegistration* FindOp(const char* op,
int version) const = 0;
// Returns optional delegates for resolving and handling ops in the flatbuffer
// model. This may be used in addition to the standard TfLiteRegistration
// lookup for graph resolution.
using TfLiteDelegatePtrVector =
std::vector<std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>>;
virtual TfLiteDelegatePtrVector GetDelegates(int num_threads) const {
return TfLiteDelegatePtrVector();
}
virtual ~OpResolver() {}
private:
/// Returns true if this OpResolver may contain any "user defined" ops.
/// By "user defined" ops, we mean any op definitions other than those
/// contained in tflite::ops::builtin::BuiltinOpResolver.
///
/// If this method returns true, it doesn't necessarily mean that the
/// OpResolver contains a user-defined op, just that the absence of
/// user-defined ops can't be guaranteed.
///
/// Note that "user-defined" ops are not the same as "custom" ops;
/// BuiltinOpResolver may support certain "custom" ops, in addition to
/// "builtin" ops, and may not support all of the "builtin" op enum values.
virtual bool MayContainUserDefinedOps() const { return true; }
friend class OpResolverInternal;
};
// Handles the logic for converting between an OperatorCode structure extracted
// from a flatbuffer and information about a registered operator
// implementation.
TfLiteStatus GetRegistrationFromOpCode(const OperatorCode* opcode,
const OpResolver& op_resolver,
ErrorReporter* error_reporter,
const TfLiteRegistration** registration);
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/op_resolver.h | C++ | apache-2.0 | 3,347 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
#define TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
/// \file
/// This header op_resolver_internal.h exists so that we can have fine-grained
/// access control on the MayContainUserDefinedOps method.
#include "tensorflow/lite/core/api/op_resolver.h"
namespace tflite {
class OpResolverInternal {
public:
static bool MayContainUserDefinedOps(const OpResolver &op_resolver) {
return op_resolver.MayContainUserDefinedOps();
}
};
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_OP_RESOLVER_INTERNAL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/op_resolver_internal.h | C++ | apache-2.0 | 1,255 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_PROFILER_H_
#define TENSORFLOW_LITE_CORE_API_PROFILER_H_
#include <cstdint>
namespace tflite {
// A simple utility for enabling profiled event tracing in TensorFlow Lite.
class Profiler {
public:
// As certain Profiler instance might be only interested in certain event
// types, we define each event type value to allow a Profiler to use
// bitmasking bitwise operations to determine whether an event should be
// recorded or not.
enum class EventType {
// Default event type, the metadata field has no special significance.
DEFAULT = 1,
// The event is an operator invocation and the event_metadata field is the
// index of operator node.
OPERATOR_INVOKE_EVENT = 2,
// The event is an invocation for an internal operator of a TFLite delegate.
// The event_metadata field is the index of operator node that's specific to
// the delegate.
DELEGATE_OPERATOR_INVOKE_EVENT = 4,
// The event is a recording of runtime instrumentation such as the overall
// TFLite runtime status, the TFLite delegate status (if a delegate
// is applied), and the overall model inference latency etc.
// Note, the delegate status and overall status are stored as separate
// event_metadata fields. In particular, the delegate status is encoded
// as DelegateStatus::full_status().
GENERAL_RUNTIME_INSTRUMENTATION_EVENT = 8,
};
virtual ~Profiler() {}
// Signals the beginning of an event and returns a handle to the profile
// event. The `event_metadata1` and `event_metadata2` have different
// interpretations based on the actual Profiler instance and the `event_type`.
// For example, as for the 'SubgraphAwareProfiler' defined in
// lite/core/subgraph.h, when the event_type is OPERATOR_INVOKE_EVENT,
// `event_metadata1` represents the index of a TFLite node, and
// `event_metadata2` represents the index of the subgraph that this event
// comes from.
virtual uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata1,
int64_t event_metadata2) = 0;
// Similar w/ the above, but `event_metadata2` defaults to 0.
uint32_t BeginEvent(const char* tag, EventType event_type,
int64_t event_metadata) {
return BeginEvent(tag, event_type, event_metadata, /*event_metadata2*/ 0);
}
// Signals an end to the specified profile event with 'event_metadata's, This
// is useful when 'event_metadata's are not available when the event begins
// or when one wants to overwrite the 'event_metadata's set at the beginning.
virtual void EndEvent(uint32_t event_handle, int64_t event_metadata1,
int64_t event_metadata2) {}
// Signals an end to the specified profile event.
virtual void EndEvent(uint32_t event_handle) = 0;
// Appends an event of type 'event_type' with 'tag' and 'event_metadata'
// which started at 'start' and ended at 'end'
// Note:
// In cases were ProfileSimmarizer and tensorflow::StatsCalculator are used
// they assume the value is in "usec", if in any case subclasses
// didn't put usec, then the values are not meaningful.
// TODO karimnosseir: Revisit and make the function more clear.
void AddEvent(const char* tag, EventType event_type, uint64_t start,
uint64_t end, int64_t event_metadata) {
AddEvent(tag, event_type, start, end, event_metadata,
/*event_metadata2*/ 0);
}
virtual void AddEvent(const char* tag, EventType event_type, uint64_t start,
uint64_t end, int64_t event_metadata1,
int64_t event_metadata2) {}
protected:
friend class ScopedProfile;
};
// Adds a profile event to `profiler` that begins with the construction
// of the object and ends when the object goes out of scope.
// The lifetime of tag should be at least the lifetime of `profiler`.
// `profiler` may be null, in which case nothing is profiled.
class ScopedProfile {
public:
ScopedProfile(Profiler* profiler, const char* tag,
Profiler::EventType event_type = Profiler::EventType::DEFAULT,
int64_t event_metadata = 0)
: profiler_(profiler), event_handle_(0) {
if (profiler) {
event_handle_ = profiler_->BeginEvent(tag, event_type, event_metadata);
}
}
~ScopedProfile() {
if (profiler_) {
profiler_->EndEvent(event_handle_);
}
}
protected:
Profiler* profiler_;
uint32_t event_handle_;
};
class ScopedOperatorProfile : public ScopedProfile {
public:
ScopedOperatorProfile(Profiler* profiler, const char* tag, int node_index)
: ScopedProfile(profiler, tag, Profiler::EventType::OPERATOR_INVOKE_EVENT,
static_cast<uint32_t>(node_index)) {}
};
class ScopedDelegateOperatorProfile : public ScopedProfile {
public:
ScopedDelegateOperatorProfile(Profiler* profiler, const char* tag,
int node_index)
: ScopedProfile(profiler, tag,
Profiler::EventType::DELEGATE_OPERATOR_INVOKE_EVENT,
static_cast<uint32_t>(node_index)) {}
};
class ScopedRuntimeInstrumentationProfile : public ScopedProfile {
public:
ScopedRuntimeInstrumentationProfile(Profiler* profiler, const char* tag)
: ScopedProfile(
profiler, tag,
Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, -1) {}
void set_runtime_status(int64_t delegate_status, int64_t interpreter_status) {
if (profiler_) {
delegate_status_ = delegate_status;
interpreter_status_ = interpreter_status;
}
}
~ScopedRuntimeInstrumentationProfile() {
if (profiler_) {
profiler_->EndEvent(event_handle_, delegate_status_, interpreter_status_);
}
}
private:
int64_t delegate_status_;
int64_t interpreter_status_;
};
} // namespace tflite
#define TFLITE_VARNAME_UNIQ_IMPL(name, ctr) name##ctr
#define TFLITE_VARNAME_UNIQ(name, ctr) TFLITE_VARNAME_UNIQ_IMPL(name, ctr)
#define TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler, tag) \
tflite::ScopedProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
(profiler), (tag))
#define TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler, tag, node_index) \
tflite::ScopedOperatorProfile TFLITE_VARNAME_UNIQ(_profile_, __COUNTER__)( \
(profiler), (tag), (node_index))
#define TFLITE_SCOPED_DELEGATE_OPERATOR_PROFILE(profiler, tag, node_index) \
tflite::ScopedDelegateOperatorProfile TFLITE_VARNAME_UNIQ( \
_profile_, __COUNTER__)((profiler), (tag), (node_index))
#define TFLITE_ADD_RUNTIME_INSTRUMENTATION_EVENT( \
profiler, tag, event_metadata1, event_metadata2) \
do { \
if (profiler) { \
const auto handle = profiler->BeginEvent( \
tag, Profiler::EventType::GENERAL_RUNTIME_INSTRUMENTATION_EVENT, \
event_metadata1, event_metadata2); \
profiler->EndEvent(handle); \
} \
} while (false);
#endif // TENSORFLOW_LITE_CORE_API_PROFILER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/profiler.h | C++ | apache-2.0 | 8,068 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/core/api/tensor_utils.h"
#include <string.h>
#include "tensorflow/lite/c/common.h"
namespace tflite {
TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor) {
if (!tensor->is_variable) {
return kTfLiteOk;
}
// TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
// to the value of the buffer.
int value = 0;
if (tensor->type == kTfLiteInt8) {
value = tensor->params.zero_point;
}
// TODO(b/139446230): Provide a platform header to better handle these
// specific scenarios.
#if __ANDROID__ || defined(__x86_64__) || defined(__i386__) || \
defined(__i386) || defined(__x86__) || defined(__X86__) || \
defined(_X86_) || defined(_M_IX86) || defined(_M_X64)
memset(tensor->data.raw, value, tensor->bytes);
#else
char* raw_ptr = tensor->data.raw;
for (size_t i = 0; i < tensor->bytes; ++i) {
*raw_ptr = value;
raw_ptr++;
}
#endif
return kTfLiteOk;
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/tensor_utils.cc | C++ | apache-2.0 | 1,644 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
#define TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
#include "tensorflow/lite/c/common.h"
namespace tflite {
// Resets a variable tensor to the default value.
TfLiteStatus ResetVariableTensor(TfLiteTensor* tensor);
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_TENSOR_UTILS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/tensor_utils.h | C++ | apache-2.0 | 1,011 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/// \file
/// Abstract interface for verifying a model.
#ifndef TENSORFLOW_LITE_CORE_API_VERIFIER_H_
#define TENSORFLOW_LITE_CORE_API_VERIFIER_H_
#include "tensorflow/lite/core/api/error_reporter.h"
namespace tflite {
/// Abstract interface that verifies whether a given model is legit.
/// It facilitates the use-case to verify and build a model without loading it
/// twice.
/// (See also "tensorflow/lite/tools/verifier.h".)
class TfLiteVerifier {
public:
/// Returns true if the model is legit.
virtual bool Verify(const char* data, int length,
ErrorReporter* reporter) = 0;
virtual ~TfLiteVerifier() {}
};
} // namespace tflite
#endif // TENSORFLOW_LITE_CORE_API_VERIFIER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/core/api/verifier.h | C++ | apache-2.0 | 1,385 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_CREATE_OP_RESOLVER_H_
#define TENSORFLOW_LITE_CREATE_OP_RESOLVER_H_
#include <memory>
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/op_resolver.h"
namespace tflite {
std::unique_ptr<MutableOpResolver> CreateOpResolver();
}
#endif // TENSORFLOW_LITE_CREATE_OP_RESOLVER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/create_op_resolver.h | C++ | apache-2.0 | 999 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Compatibility shim for moved header location.
#ifndef TENSORFLOW_LITE_ERROR_REPORTER_H_
#define TENSORFLOW_LITE_ERROR_REPORTER_H_
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/stderr_reporter.h"
#endif // TENSORFLOW_LITE_ERROR_REPORTER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/error_reporter.h | C | apache-2.0 | 947 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
load("@flatbuffers//:build_defs.bzl", "flatbuffer_cc_library", "flatbuffer_java_library")
load("//tensorflow/core/platform:build_config_root.bzl", "tf_gpu_tests_tags")
load("//tensorflow/lite:special_rules.bzl", "tflite_extra_gles_deps", "tflite_portable_test_suite")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"],
)
flatbuffer_cc_library(
name = "database_fbs",
srcs = ["database.fbs"],
)
exports_files(srcs = ["database.fbs"])
flatbuffer_java_library(
name = "database_fbs_java",
srcs = ["database.fbs"],
package_prefix = "org.tensorflow",
)
cc_library(
name = "canonicalize_value",
srcs = ["canonicalize_value.cc"],
hdrs = ["canonicalize_value.h"],
deps = [
"@com_google_absl//absl/algorithm:container",
"@com_google_absl//absl/strings",
],
)
cc_test(
name = "canonicalize_value_test",
srcs = ["canonicalize_value_test.cc"],
deps = [
":canonicalize_value",
"@com_google_googletest//:gtest_main",
],
)
cc_library(
name = "devicedb",
srcs = [
"devicedb.cc",
],
hdrs = [
"devicedb.h",
"variables.h",
],
deps = [
":database_fbs",
],
)
cc_binary(
name = "json_to_fb",
srcs = ["json_to_fb.cc"],
deps = [
"//tensorflow/lite/tools:command_line_flags",
"@flatbuffers",
],
)
genrule(
name = "devicedb-sample_bin",
srcs = [
"database.fbs",
"devicedb-sample.json",
],
outs = ["devicedb-sample.bin"],
cmd = """
$(location :json_to_fb) \
--fbs=$(location :database.fbs) \
--json_input=$(location :devicedb-sample.json) \
--fb_output=$(@)
""",
tools = [":json_to_fb"],
)
py_binary(
name = "convert_binary_to_cc_source",
srcs = ["convert_binary_to_cc_source.py"],
python_version = "PY3",
srcs_version = "PY3",
visibility = ["//visibility:public"],
)
genrule(
name = "devicedb-sample_cc",
srcs = ["devicedb-sample.bin"],
outs = [
"devicedb-sample.cc",
"devicedb-sample.h",
],
# convert_file_to_c_source for some reason doesn't define the global with
# 'extern', which is needed for global const variables in C++.
cmd = """
$(location :convert_binary_to_cc_source) \
--input_binary_file $(location :devicedb-sample.bin) \
--output_header_file $(location :devicedb-sample.h) \
--output_source_file $(location :devicedb-sample.cc) \
--array_variable_name g_tflite_acceleration_devicedb_sample_binary
""",
tools = [":convert_binary_to_cc_source"],
)
cc_library(
name = "devicedb_sample",
srcs = ["devicedb-sample.cc"],
hdrs = ["devicedb-sample.h"],
deps = [":database_fbs"],
)
cc_test(
name = "devicedb_test",
srcs = [
"devicedb_test.cc",
],
deps = [
":database_fbs",
":devicedb",
":devicedb_sample",
"//tensorflow/lite/testing:util",
"@com_google_googletest//:gtest_main",
"@flatbuffers",
],
)
exports_files(["gpu_compatibility.bin"])
genrule(
name = "gpu_compatibility_binary",
srcs = ["gpu_compatibility.bin"],
outs = [
"gpu_compatibility_binary.h",
"gpu_compatibility_binary.cc",
],
# convert_file_to_c_source for some reason doesn't define the global with
# 'extern', which is needed for global const variables in C++.
cmd = """
$(location :convert_binary_to_cc_source) \
--input_binary_file $(location :gpu_compatibility.bin) \
--output_header_file $(location :gpu_compatibility_binary.h) \
--output_source_file $(location :gpu_compatibility_binary.cc) \
--array_variable_name g_tflite_acceleration_gpu_compatibility_binary
""",
tools = [":convert_binary_to_cc_source"],
)
cc_library(
name = "android_info",
srcs = ["android_info.cc"],
hdrs = ["android_info.h"],
deps = [
"@com_google_absl//absl/status",
],
)
cc_library(
name = "gpu_compatibility",
srcs = [
"gpu_compatibility.cc",
"gpu_compatibility_binary.cc",
"gpu_compatibility_binary.h",
],
hdrs = [
"gpu_compatibility.h",
],
deps = [
":canonicalize_value",
":android_info",
":database_fbs",
":devicedb",
"@com_google_absl//absl/status",
"@com_google_absl//absl/strings",
"@flatbuffers",
"//tensorflow/lite/delegates/gpu:delegate",
"//tensorflow/lite/delegates/gpu/common:gpu_info",
] + tflite_extra_gles_deps(),
)
cc_test(
name = "gpu_compatibility_test",
srcs = ["gpu_compatibility_test.cc"],
tags = tf_gpu_tests_tags() + [
"no_cuda_asan", # TODO(b/181032551).
],
deps = [
":devicedb_sample",
":gpu_compatibility",
"@com_google_googletest//:gtest_main",
],
)
tflite_portable_test_suite()
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/BUILD | Starlark | apache-2.0 | 5,654 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include <iostream>
#include <string>
#include "absl/status/status.h"
#ifdef __ANDROID__
#include <sys/system_properties.h>
#endif // __ANDROID__
namespace {
std::string GetPropertyValue(const std::string& property) {
#ifdef __ANDROID__
char value[PROP_VALUE_MAX];
__system_property_get(property.c_str(), value);
return std::string(value);
#else // !__ANDROID__
return std::string();
#endif // __ANDROID__
}
} // namespace
namespace tflite {
namespace acceleration {
absl::Status RequestAndroidInfo(AndroidInfo* info_out) {
if (!info_out) {
return absl::InvalidArgumentError("info_out may not be null");
}
info_out->android_sdk_version = GetPropertyValue("ro.build.version.sdk");
info_out->device = GetPropertyValue("ro.product.device");
info_out->model = GetPropertyValue("ro.product.model");
info_out->manufacturer = GetPropertyValue("ro.product.manufacturer");
#ifdef __ANDROID__
// Based on
// https://github.com/flutter/plugins/blob/master/packages/device_info/device_info/android/src/main/java/io/flutter/plugins/deviceinfo/MethodCallHandlerImpl.java
// + QUMA detection (system properties return empty) and qemu detection
// (ro.kernel.qemu).
std::string brand = GetPropertyValue("ro.product.brand");
const std::string& device = info_out->device;
std::string fingerprint = GetPropertyValue("ro.build.fingerprint");
std::string hardware = GetPropertyValue("ro.hardware");
const std::string& model = info_out->model;
const std::string& manufacturer = info_out->manufacturer;
std::string product = GetPropertyValue("ro.build.product");
std::string ro_kernel_qemu = GetPropertyValue("ro.kernel.qemu");
info_out->is_emulator =
((brand.find("generic") == 0 && device.find("generic") == 0) || // NOLINT
fingerprint.find("generic") == 0 || // NOLINT
fingerprint.find("unknown") == 0 || // NOLINT
hardware.find("goldfish") != std::string::npos || // NOLINT
hardware.find("ranchu") != std::string::npos || // NOLINT
model.find("google_sdk") != std::string::npos || // NOLINT
model.find("Emulator") != std::string::npos || // NOLINT
model.find("Android SDK built for x86") != // NOLINT
std::string::npos || // NOLINT
manufacturer.find("Genymotion") != std::string::npos || // NOLINT
product.find("sdk_google") != std::string::npos || // NOLINT
product.find("google_sdk") != std::string::npos || // NOLINT
product.find("sdk") != std::string::npos || // NOLINT
product.find("sdk_x86") != std::string::npos || // NOLINT
product.find("vbox86p") != std::string::npos || // NOLINT
product.find("emulator") != std::string::npos || // NOLINT
product.find("simulator") != std::string::npos || // NOLINT
ro_kernel_qemu == "1" || // NOLINT
info_out->android_sdk_version.empty()); // NOLINT
#else
info_out->is_emulator = false;
#endif
return absl::OkStatus();
}
} // namespace acceleration
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/android_info.cc | C++ | apache-2.0 | 4,109 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_ANDROID_INFO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_ANDROID_INFO_H_
#include <string>
#include "absl/status/status.h"
namespace tflite {
namespace acceleration {
// Information about and Android device, used for determining compatibility
// status.
struct AndroidInfo {
// Property ro.build.version.sdk
std::string android_sdk_version;
// Property ro.product.model
std::string model;
// Property ro.product.device
std::string device;
// Property ro.product.manufacturer
std::string manufacturer;
// Whether code is running on an emulator.
bool is_emulator;
};
absl::Status RequestAndroidInfo(AndroidInfo* info_out);
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_ANDROID_INFO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/android_info.h | C++ | apache-2.0 | 1,539 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <string>
#include "absl/algorithm/container.h"
#include "absl/strings/ascii.h"
#include "absl/strings/string_view.h"
namespace tflite::acceleration {
namespace {
inline char ascii_normalise(const unsigned char c) {
if (c == ' ' || c == '-') {
return '_';
}
return absl::ascii_tolower(c);
}
} // namespace
std::string CanonicalizeValue(absl::string_view input) {
std::string output;
absl::c_transform(input, std::back_inserter(output),
tflite::acceleration::ascii_normalise);
return output;
}
} // namespace tflite::acceleration
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.cc | C++ | apache-2.0 | 1,339 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
#include <string>
#include "absl/strings/string_view.h"
namespace tflite::acceleration {
// Normalises the given ASCII input by converting all alphabets to lower case
// and replacing ' ' and '-' with '_'.
std::string CanonicalizeValue(absl::string_view input);
} // namespace tflite::acceleration
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_CANONICALIZE_VALUE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h | C++ | apache-2.0 | 1,234 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include <string>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
namespace tflite::acceleration {
namespace {
TEST(CanonicalizeValue, CharactersAreLowercased) {
EXPECT_EQ(CanonicalizeValue("hElLo"), "hello");
}
TEST(CanonicalizeValue, HyphensAreReplaced) {
EXPECT_EQ(CanonicalizeValue("-"), "_");
}
TEST(CanonicalizeValue, SpacesAreReplaced) {
EXPECT_EQ(CanonicalizeValue(" "), "_");
}
TEST(CanonicalizeValue, OtherSpecialCharactersAreUnaffected) {
for (unsigned char c = 0; c < 65; ++c) {
if (c == ' ' || c == '-') continue;
std::string s = {1, static_cast<char>(c)};
EXPECT_EQ(CanonicalizeValue(s), s);
}
}
} // namespace
} // namespace tflite::acceleration
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value_test.cc | C++ | apache-2.0 | 1,451 |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Simple script to convert binary file to C++ source code for embedding."""
# This is a version of //tensorflow/lite/python/convert_file_to_c_source.py
# with minimal dependencies to reduce build times. See b/158254039.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import datetime
import sys
# Cribbed from //tensorflow/lite/python/util.py
# Changed:
# - Alignment from 4 to 16 for generality (16 can be required for SIMD)
# - Added 'extern' to source for building on C++ target platforms
# - Changed comments to refer to this script, and C++ rather than C
def _convert_bytes_to_cc_source(data,
array_name,
max_line_width=80,
include_guard=None,
include_path=None,
use_tensorflow_license=False):
"""Returns strings representing a C++ constant array containing `data`.
Args:
data: Byte array that will be converted into a C++ constant.
array_name: String to use as the variable name for the constant array.
max_line_width: The longest line length, for formatting purposes.
include_guard: Name to use for the include guard macro definition.
include_path: Optional path to include in the source file.
use_tensorflow_license: Whether to include the standard TensorFlow Apache2
license in the generated files.
Returns:
Text that can be compiled as a C++ source file to link in the data as a
literal array of values.
Text that can be used as a C++ header file to reference the literal array.
"""
starting_pad = " "
array_lines = []
array_line = starting_pad
for value in bytearray(data):
if (len(array_line) + 4) > max_line_width:
array_lines.append(array_line + "\n")
array_line = starting_pad
array_line += " 0x%02x," % value
if len(array_line) > len(starting_pad):
array_lines.append(array_line + "\n")
array_values = "".join(array_lines)
if include_guard is None:
include_guard = "TENSORFLOW_LITE_UTIL_" + array_name.upper() + "_DATA_H_"
if include_path is not None:
include_line = "#include \"{include_path}\"\n".format(
include_path=include_path)
else:
include_line = ""
if use_tensorflow_license:
license_text = """
/* Copyright {year} The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
""".format(year=datetime.date.today().year)
else:
license_text = ""
source_template = """{license_text}
// This is a binary file that has been converted into a C++ data array using the
// //tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py
// script. This form is useful for compiling into a binary to simplify
// deployment on mobile devices
{include_line}
// We need to keep the data array aligned on some architectures.
#ifdef __has_attribute
#define HAVE_ATTRIBUTE(x) __has_attribute(x)
#else
#define HAVE_ATTRIBUTE(x) 0
#endif
#if HAVE_ATTRIBUTE(aligned) || (defined(__GNUC__) && !defined(__clang__))
#define DATA_ALIGN_ATTRIBUTE __attribute__((aligned(16)))
#else
#define DATA_ALIGN_ATTRIBUTE
#endif
extern const unsigned char {array_name}[] DATA_ALIGN_ATTRIBUTE = {{
{array_values}}};
extern const int {array_name}_len = {array_length};
"""
source_text = source_template.format(
array_name=array_name,
array_length=len(data),
array_values=array_values,
license_text=license_text,
include_line=include_line)
header_template = """
{license_text}
// This is a binary file that has been converted into a C++ data array using the
// //tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py
// script. This form is useful for compiling into a binary to simplify
// deployment on mobile devices
#ifndef {include_guard}
#define {include_guard}
extern const unsigned char {array_name}[];
extern const int {array_name}_len;
#endif // {include_guard}
"""
header_text = header_template.format(
array_name=array_name,
include_guard=include_guard,
license_text=license_text)
return source_text, header_text
def main():
parser = argparse.ArgumentParser(
description=("Binary to C++ source converter"))
parser.add_argument(
"--input_binary_file",
type=str,
help="Full filepath of input binary.",
required=True)
parser.add_argument(
"--output_header_file",
type=str,
help="Full filepath of output header.",
required=True)
parser.add_argument(
"--array_variable_name",
type=str,
help="Full filepath of output source.",
required=True)
parser.add_argument(
"--output_source_file",
type=str,
help="Name of global variable that will contain the binary data.",
required=True)
flags, _ = parser.parse_known_args(args=sys.argv[1:])
with open(flags.input_binary_file, "rb") as input_handle:
input_data = input_handle.read()
source, header = _convert_bytes_to_cc_source(
data=input_data,
array_name=flags.array_variable_name,
use_tensorflow_license=True)
with open(flags.output_source_file, "w") as source_handle:
source_handle.write(source)
with open(flags.output_header_file, "w") as header_handle:
header_handle.write(header)
if __name__ == "__main__":
main()
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/convert_binary_to_cc_source.py | Python | apache-2.0 | 6,726 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <map>
#include <string>
#include <vector>
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
namespace tflite {
namespace acceleration {
namespace {
std::vector<const DeviceDecisionTreeEdge*> Find(
const DeviceDecisionTreeNode* root, const std::string& value) {
std::vector<const DeviceDecisionTreeEdge*> found;
if (root->comparison() == Comparison_EQUAL) {
// Exact match.
const DeviceDecisionTreeEdge* possible =
root->items()->LookupByKey(value.c_str());
if (possible) {
found.push_back(possible);
}
} else {
// Minimum: value should be at least item's value.
for (const DeviceDecisionTreeEdge* item : *(root->items())) {
if (value >= item->value()->str()) {
found.push_back(item);
}
}
}
return found;
}
void UpdateVariablesFromDeviceDecisionTreeEdges(
std::map<std::string, std::string>* variable_values,
const DeviceDecisionTreeEdge& item) {
if (item.derived_properties()) {
for (const DerivedProperty* p : *(item.derived_properties())) {
(*variable_values)[p->variable()->str()] = p->value()->str();
}
}
}
void Follow(const DeviceDecisionTreeNode* root,
std::map<std::string, std::string>* variable_values) {
if (!root->variable()) {
return;
}
auto possible_value = variable_values->find(root->variable()->str());
if (possible_value == variable_values->end()) {
return;
}
std::vector<const DeviceDecisionTreeEdge*> edges =
Find(root, possible_value->second);
for (const DeviceDecisionTreeEdge* edge : edges) {
UpdateVariablesFromDeviceDecisionTreeEdges(variable_values, *edge);
if (edge->children()) {
for (const DeviceDecisionTreeNode* root : *(edge->children())) {
Follow(root, variable_values);
}
}
}
}
} // namespace
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database) {
if (!database.root()) return;
for (const DeviceDecisionTreeNode* root : *(database.root())) {
Follow(root, variable_values);
}
}
} // namespace acceleration
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/devicedb.cc | C++ | apache-2.0 | 2,926 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DECISION_TREE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DECISION_TREE_H_
#include <map>
#include <string>
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
namespace tflite {
namespace acceleration {
// Use the variables in `variable_values` to evaluate the decision tree in
// `database` and update the `variable_values` based on derived properties in
// the decision tree.
//
// See database.fbs for a description of the decision tree.
void UpdateVariablesFromDatabase(
std::map<std::string, std::string>* variable_values,
const DeviceDatabase& database);
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_DECISION_TREE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/devicedb.h | C++ | apache-2.0 | 1,504 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include <memory>
#include <string>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb-sample.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
#include "tensorflow/lite/testing/util.h"
namespace tflite {
namespace acceleration {
namespace {
class DeviceDbTest : public ::testing::Test {
protected:
void LoadSample() {
device_db_ = flatbuffers::GetRoot<DeviceDatabase>(
g_tflite_acceleration_devicedb_sample_binary);
}
const DeviceDatabase* device_db_ = nullptr;
};
TEST_F(DeviceDbTest, Load) {
LoadSample();
ASSERT_TRUE(device_db_);
ASSERT_TRUE(device_db_->root());
EXPECT_EQ(device_db_->root()->size(), 3);
}
TEST_F(DeviceDbTest, SocLookup) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find first device mapping.
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7872");
// Find second device mapping.
variables.clear();
variables[kDeviceModel] = "sc_02l";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[kSoCModel], "exynos_7885");
// Make sure no results are returned without a match.
variables.clear();
variables[kDeviceModel] = "nosuch";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(kSoCModel), variables.end());
}
TEST_F(DeviceDbTest, StatusLookupWithSoC) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find exact match.
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7872";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
// Ensure no results without a match.
variables[kOpenGLESVersion] = "3.0";
variables.erase(variables.find(gpu::kStatus));
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
// Find no results with too low an android version.
variables.clear();
variables[kOpenGLESVersion] = "3.1";
variables[kSoCModel] = "exynos_7883";
variables[kAndroidSdkVersion] = "24";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables.find(gpu::kStatus), variables.end());
// Find a match with android version above minimum.
variables[kAndroidSdkVersion] = "29";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupWithDevice) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find unsupported device (same model, different device).
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810f";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusUnsupported);
// Find supported device (same model, different device).
variables.clear();
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "sm_j810m";
variables[kDeviceName] = "j8y18lte";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
TEST_F(DeviceDbTest, StatusLookupBasedOnDerivedProperties) {
LoadSample();
ASSERT_TRUE(device_db_);
std::map<std::string, std::string> variables;
// Find status based on SoC derived from model.
variables[kOpenGLESVersion] = "3.1";
variables[kAndroidSdkVersion] = "24";
variables[kDeviceModel] = "m712c";
UpdateVariablesFromDatabase(&variables, *device_db_);
EXPECT_EQ(variables[gpu::kStatus], gpu::kStatusSupported);
}
} // namespace
} // namespace acceleration
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/devicedb_test.cc | C++ | apache-2.0 | 4,692 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.h"
#include <cctype>
#include <map>
#include <memory>
#include <string>
#include "absl/strings/string_view.h"
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/experimental/acceleration/compatibility/canonicalize_value.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/database_generated.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility_binary.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/variables.h"
namespace tflite {
namespace acceleration {
namespace {
void CanonicalizeValues(std::map<std::string, std::string>* variable_values) {
for (auto& i : *variable_values) {
i.second = CanonicalizeValue(i.second);
}
}
} // namespace
GPUCompatibilityList::GPUCompatibilityList(
const unsigned char* compatibility_list_flatbuffer) {
if (!compatibility_list_flatbuffer) return;
database_ =
flatbuffers::GetRoot<DeviceDatabase>(compatibility_list_flatbuffer);
}
std::unique_ptr<GPUCompatibilityList> GPUCompatibilityList::Create() {
return Create(g_tflite_acceleration_gpu_compatibility_binary,
g_tflite_acceleration_gpu_compatibility_binary_len);
}
std::unique_ptr<GPUCompatibilityList> GPUCompatibilityList::Create(
const unsigned char* compatibility_list_flatbuffer, int length) {
if (!compatibility_list_flatbuffer ||
!IsValidFlatbuffer(compatibility_list_flatbuffer, length)) {
return nullptr;
}
return std::unique_ptr<GPUCompatibilityList>(
new GPUCompatibilityList(compatibility_list_flatbuffer));
}
std::map<std::string, std::string> GPUCompatibilityList::CalculateVariables(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const {
std::map<std::string, std::string> variables;
variables[kAndroidSdkVersion] = android_info.android_sdk_version;
variables[kDeviceModel] = android_info.model;
variables[kDeviceName] = android_info.device;
variables[kManufacturer] = android_info.manufacturer;
const auto& gl_info = gpu_info.opengl_info;
variables[kGPUModel] = gl_info.renderer_name;
char buffer[128];
int len = snprintf(buffer, 128 - 1, "%d.%d", gl_info.major_version,
gl_info.minor_version);
buffer[len] = '\0';
variables[kOpenGLESVersion] = std::string(buffer);
CanonicalizeValues(&variables);
if (!database_) return variables;
UpdateVariablesFromDatabase(&variables, *database_);
return variables;
}
bool GPUCompatibilityList::Includes(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const {
auto variables = CalculateVariables(android_info, gpu_info);
return variables[gpu::kStatus] == std::string(gpu::kStatusSupported);
}
TfLiteGpuDelegateOptionsV2 GPUCompatibilityList::GetBestOptionsFor(
const AndroidInfo& /* android_info */,
const ::tflite::gpu::GpuInfo& /* gpu_info */) const {
// This method is for forwards-compatibility: the list may later include
// information about which backend to choose (OpenGL/OpenCL/Vulkan) or other
// options.
return TfLiteGpuDelegateOptionsV2Default();
}
// static
bool GPUCompatibilityList::IsValidFlatbuffer(const unsigned char* data,
int len) {
// Verify opensource db.
flatbuffers::Verifier verifier(reinterpret_cast<const uint8_t*>(data), len);
return tflite::acceleration::VerifyDeviceDatabaseBuffer(verifier);
}
} // namespace acceleration
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.cc | C++ | apache-2.0 | 4,329 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_GPU_COMPATIBILITY_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_GPU_COMPATIBILITY_H_
#include <map>
#include <memory>
#include <string>
#include "tensorflow/lite/delegates/gpu/common/gpu_info.h"
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/android_info.h"
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb.h"
namespace tflite {
namespace acceleration {
// This class provides information on GPU delegate support.
//
// The GPU delegate is supported on a subset of Android devices, depending on
// Android version, OpenGL ES version, GPU chipset etc. The support is based on
// measure stability, correctness and peformance. For more detail see README.md.
//
// Example usage:
// tflite::Interpreter* interpreter = ... ;
// tflite::acceleration::AndroidInfo android_info;
// tflite::gpu::GpuInfo gpu_info;
// EXPECT_OK(tflite::acceleration::RequestAndroidInfo(&android_info));
// EXPECT_OK(tflite::gpu::gl::EglEnvironment::NewEglEnvironment(&env));
// EXPECT_OK(tflite::gpu::gl::RequestGpuInfo(&tflite_gpu_info));
// tflite::acceleration::GPUCompatibilityList list;
// TfLiteDelegate* gpu_delegate = nullptr;
// TfLiteGpuDelegateOptions gpu_options;
// if (list.Includes(android_info, gpu_info)) {
// gpu_options = list.BestOptionsFor(android_info, gpu_info);
// gpu_delegate = TfLiteGpuDelegateCreate(&gpu_options);
// EXPECT_EQ(interpreter->ModifyGraphWithDelegate(gpu_delegate), TfLiteOk);
// } else {
// // Fallback path.
// }
class GPUCompatibilityList {
public:
// Construct list from bundled data. Returns a unique_ptr to a nullptr if
// creation fails.
static std::unique_ptr<GPUCompatibilityList> Create();
// Constructs list from the given flatbuffer data. Returns a unique_ptr to a
// nullptr is the given flatbuffer is empty or invalid.
static std::unique_ptr<GPUCompatibilityList> Create(
const unsigned char* compatibility_list_flatbuffer, int length);
// Returns true if the provided device specs are supported by the database.
bool Includes(const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const;
// Returns the best TfLiteGpuDelegateOptionsV2 for the provided device specs
// based on the database. The output can be modified as desired before passing
// to delegate creation.
TfLiteGpuDelegateOptionsV2 GetBestOptionsFor(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const;
// Convert android_info and gpu_info into a set of variables used for querying
// the list, and update variables from list data. See variables.h
// and devicedb.h for more information.
std::map<std::string, std::string> CalculateVariables(
const AndroidInfo& android_info,
const ::tflite::gpu::GpuInfo& gpu_info) const;
GPUCompatibilityList(const GPUCompatibilityList&) = delete;
GPUCompatibilityList& operator=(const GPUCompatibilityList&) = delete;
// Checks if the provided byte array represents a valid compatibility list
static bool IsValidFlatbuffer(const unsigned char* data, int len);
protected:
const DeviceDatabase* database_;
private:
explicit GPUCompatibilityList(
const unsigned char* compatibility_list_flatbuffer);
};
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_GPU_COMPATIBILITY_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.h | C++ | apache-2.0 | 4,219 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility.h"
#include <algorithm>
#include <cstddef>
#include <memory>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "tensorflow/lite/experimental/acceleration/compatibility/devicedb-sample.h"
namespace {
class GPUCompatibilityTest : public ::testing::Test {
protected:
GPUCompatibilityTest() {
list_ = tflite::acceleration::GPUCompatibilityList::Create(
g_tflite_acceleration_devicedb_sample_binary,
g_tflite_acceleration_devicedb_sample_binary_len);
}
std::unique_ptr<tflite::acceleration::GPUCompatibilityList> list_;
};
TEST_F(GPUCompatibilityTest, ReturnsSupportedForFullMatch) {
ASSERT_TRUE(list_ != nullptr);
tflite::acceleration::AndroidInfo android_info = {.android_sdk_version = "24",
.model = "m712c"};
tflite::gpu::GpuInfo tflite_gpu_info;
tflite_gpu_info.opengl_info.major_version = 3;
tflite_gpu_info.opengl_info.minor_version = 1;
EXPECT_TRUE(list_->Includes(android_info, tflite_gpu_info));
}
TEST_F(GPUCompatibilityTest, ReturnsUnsupportedForFullMatch) {
ASSERT_TRUE(list_ != nullptr);
tflite::acceleration::AndroidInfo android_info = {.android_sdk_version = "28",
.model = "SM-G960F",
.device = "starlte",
.manufacturer = "Samsung"};
tflite::gpu::GpuInfo tflite_gpu_info;
tflite_gpu_info.opengl_info.renderer_name = "Mali-G72";
tflite_gpu_info.opengl_info.major_version = 3;
tflite_gpu_info.opengl_info.minor_version = 2;
EXPECT_FALSE(list_->Includes(android_info, tflite_gpu_info));
}
TEST_F(GPUCompatibilityTest, ReturnsDefaultOptions) {
ASSERT_TRUE(list_ != nullptr);
tflite::acceleration::AndroidInfo android_info;
tflite::gpu::GpuInfo tflite_gpu_info;
auto default_options = TfLiteGpuDelegateOptionsV2Default();
auto best_options = list_->GetBestOptionsFor(android_info, tflite_gpu_info);
EXPECT_EQ(best_options.is_precision_loss_allowed,
default_options.is_precision_loss_allowed);
EXPECT_EQ(best_options.inference_preference,
default_options.inference_preference);
EXPECT_EQ(best_options.inference_priority1,
default_options.inference_priority1);
EXPECT_EQ(best_options.inference_priority2,
default_options.inference_priority2);
EXPECT_EQ(best_options.inference_priority3,
default_options.inference_priority3);
EXPECT_EQ(best_options.experimental_flags,
default_options.experimental_flags);
EXPECT_EQ(best_options.max_delegated_partitions,
default_options.max_delegated_partitions);
}
TEST(GPUCompatibility, RecogniseValidCompatibilityListFlatbuffer) {
EXPECT_TRUE(tflite::acceleration::GPUCompatibilityList::IsValidFlatbuffer(
g_tflite_acceleration_devicedb_sample_binary,
g_tflite_acceleration_devicedb_sample_binary_len));
}
TEST(GPUCompatibility, RecogniseInvalidCompatibilityListFlatbuffer) {
unsigned char invalid_buffer[100];
std::fill(invalid_buffer, invalid_buffer + 100, ' ');
EXPECT_FALSE(tflite::acceleration::GPUCompatibilityList::IsValidFlatbuffer(
invalid_buffer, 100));
}
TEST(GPUCompatibility, CreationWithInvalidCompatibilityListFlatbuffer) {
unsigned char invalid_buffer[10];
std::fill(invalid_buffer, invalid_buffer + 10, ' ');
std::unique_ptr<tflite::acceleration::GPUCompatibilityList> list =
tflite::acceleration::GPUCompatibilityList::Create(invalid_buffer, 10);
EXPECT_EQ(list, nullptr);
}
TEST(GPUCompatibility, CreationWithNullCompatibilityListFlatbuffer) {
std::unique_ptr<tflite::acceleration::GPUCompatibilityList> list =
tflite::acceleration::GPUCompatibilityList::Create(nullptr, 0);
EXPECT_EQ(list, nullptr);
}
} // namespace
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/gpu_compatibility_test.cc | C++ | apache-2.0 | 4,577 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Simple program to convert from JSON to binary flatbuffers for given schema.
//
// Used for creating the binary version of a compatibility list.
//
// The flatc command line is not available in all build environments.
#include <fstream>
#include <iostream>
#include <string>
#include <vector>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "flatbuffers/idl.h" // from @flatbuffers
#include "flatbuffers/reflection.h" // from @flatbuffers
#include "flatbuffers/reflection_generated.h" // from @flatbuffers
#include "flatbuffers/util.h" // from @flatbuffers
#include "tensorflow/lite/tools/command_line_flags.h"
int main(int argc, char** argv) {
std::string json_path, fbs_path, fb_path;
std::vector<tflite::Flag> flags = {
tflite::Flag::CreateFlag("json_input", &json_path,
"Path to input json file."),
tflite::Flag::CreateFlag("fbs", &fbs_path,
"Path to flatbuffer schema to use."),
tflite::Flag::CreateFlag("fb_output", &fb_path,
"Path to a output binary flatbuffer."),
};
const bool parse_result =
tflite::Flags::Parse(&argc, const_cast<const char**>(argv), flags);
if (!parse_result || json_path.empty() || fbs_path.empty() ||
fb_path.empty()) {
std::cerr << tflite::Flags::Usage(argv[0], flags);
return 1;
}
std::string json_contents;
if (!flatbuffers::LoadFile(json_path.c_str(), false, &json_contents)) {
std::cerr << "Unable to load file " << json_path << std::endl;
return 2;
}
std::string fbs_contents;
if (!flatbuffers::LoadFile(fbs_path.c_str(), false, &fbs_contents)) {
std::cerr << "Unable to load file " << fbs_path << std::endl;
return 3;
}
const char* include_directories[] = {nullptr};
flatbuffers::Parser schema_parser;
if (!schema_parser.Parse(fbs_contents.c_str(), include_directories)) {
std::cerr << "Unable to parse schema " << schema_parser.error_ << std::endl;
return 4;
}
schema_parser.Serialize();
auto schema =
reflection::GetSchema(schema_parser.builder_.GetBufferPointer());
auto root_table = schema->root_table();
flatbuffers::Parser parser;
parser.Deserialize(schema_parser.builder_.GetBufferPointer(),
schema_parser.builder_.GetSize());
if (!parser.Parse(json_contents.c_str(), include_directories,
json_path.c_str())) {
std::cerr << "Unable to parse json " << parser.error_ << std::endl;
return 5;
}
// Use CopyTable() to deduplicate the strings.
const uint8_t* buffer = parser.builder_.GetBufferPointer();
flatbuffers::FlatBufferBuilder fbb;
auto root_offset = flatbuffers::CopyTable(
fbb, *schema, *root_table, *flatbuffers::GetAnyRoot(buffer), true);
fbb.Finish(root_offset);
std::string binary(reinterpret_cast<const char*>(fbb.GetBufferPointer()),
fbb.GetSize());
std::ofstream output;
output.open(fb_path);
output << binary;
output.close();
return 0;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/json_to_fb.cc | C++ | apache-2.0 | 3,698 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_VARIABLES_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_VARIABLES_H_
// This file lists generally useful compatibility properties.
// Properties starting with "tflite." are reserved.
// Users of the compatibility library can use arbitrary other property names.
namespace tflite {
namespace acceleration {
// System properties, not specific to any single delegate.
// Android properties.
//
// Android SDK version number. Android system property ro.build.version.sdk.
// E.g., "28".
constexpr char kAndroidSdkVersion[] = "tflite.android_sdk_version";
// SoC model. Looked up from database or possibly returned from Android system
// property ro.board.platform, normalized. E.g., "sdm450".
constexpr char kSoCModel[] = "tflite.soc_model";
// SoC vendor. Looked up from database. E.g., "qualcomm".
constexpr char kSoCVendor[] = "tflite.soc_vendor";
// Device manufacturer. Android API android.os.Build.MANUFACTURER, normalized.
// E.g., "google".
constexpr char kManufacturer[] = "tflite.manufacturer";
// Device model. Android API android.os.Build.MODEL, normalized.
// E.g., "pixel_2".
constexpr char kDeviceModel[] = "tflite.device_model";
// Device name. Android API android.os.Build.DEVICE, normalized.
// E.g., "walleye".
constexpr char kDeviceName[] = "tflite.device_name";
// GPU-related properties.
//
// OpenGL ES version. E.g., 3.2.
constexpr char kOpenGLESVersion[] = "tflite.opengl_es_version";
// GPU model, result of querying GL_RENDERER, normalized. E.g.,
// "adreno_(tm)_505".
constexpr char kGPUModel[] = "tflite.gpu_model";
// GPU vendor, normalized. E.g., "adreno_(tm)_505".
constexpr char kGPUVendor[] = "tflite.gpu_vendor";
// OpenGL driver version, result of querying GL_VERSION. E.g.,
// "opengl_es_3.2_v@328.0_(git@6fb5a5b,_ife855c4895)_(date:08/21/18)"
constexpr char kOpenGLDriverVersion[] = "tflite.opengl_driver_version";
// NNAPI-related properties.
//
// NNAPI accelerator name, returned by ANeuralNetworksDevice_getName. E.g.,
// "qti-dsp".
constexpr char kNNAPIAccelerator[] = "tflite.nnapi_accelerator";
// NNAPI accelerator feature level, returned by
// ANeuralNetworksDevice_getFeatureLevel. E.g., 29. Actual variables are named
// "tflite.nnapi_feature_level.<accelerator name>", e.g.,
// "tflite.nnapi_feature_level.qti-dsp".
constexpr char kNNAPIFeatureLevelPrefix[] = "tflite.nnapi_feature_level";
namespace gpu {
// GPU-delegate derived properties.
// Whether the GPU delegate works in general.
// Possible values are ("", "SUPPORTED", "UNSUPPORTED"). An empty value for
// this field means that the device is unsupported.
constexpr char kStatus[] = "tflite.gpu.status";
constexpr char kStatusSupported[] = "SUPPORTED";
constexpr char kStatusUnsupported[] = "UNSUPPORTED";
} // namespace gpu
} // namespace acceleration
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_COMPATIBILITY_VARIABLES_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/compatibility/variables.h | C++ | apache-2.0 | 3,627 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
load("@flatbuffers//:build_defs.bzl", "DEFAULT_FLATC_ARGS", "flatbuffer_cc_library", "flatbuffer_java_library", "flatc_path")
load("//tensorflow:tensorflow.bzl", "get_compatible_with_portable")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"],
)
genrule(
name = "configuration_schema",
srcs = ["configuration.proto"],
outs = ["configuration.fbs"],
# We rename the namespace since otherwise the proto classes and flatbuffer
# classes would have the same names.
cmd = """
$(location {}) --proto -o $(@D) $(location :configuration.proto)
perl -p -i -e 's/tflite.proto/tflite/' $(@D)/configuration.fbs
""".format(flatc_path),
compatible_with = get_compatible_with_portable(),
tools = [
flatc_path,
],
)
genrule(
name = "configuration_fbs_contents_cc",
srcs = ["configuration.fbs"],
outs = ["configuration_fbs_contents-inl.h"],
cmd = """
echo 'constexpr char configuration_fbs_contents[] = R"Delimiter(' > $(@)
cat < $(<) >> $(@)
echo ')Delimiter";' >> $(@)
""",
)
exports_files(["configuration.proto"])
proto_library(
name = "configuration_proto",
srcs = [
"configuration.proto",
],
)
cc_proto_library(
name = "configuration_cc_proto",
deps = [":configuration_proto"],
)
java_lite_proto_library(
name = "configuration_java_proto_lite",
deps = [":configuration_proto"],
)
flatbuffer_cc_library(
name = "configuration_fbs",
srcs = [":configuration.fbs"],
compatible_with = get_compatible_with_portable(),
flatc_args = DEFAULT_FLATC_ARGS + ["--gen-compare"],
)
flatbuffer_java_library(
name = "configuration_fbs_java",
srcs = [":configuration.fbs"],
)
cc_library(
name = "proto_to_flatbuffer",
srcs = [
"proto_to_flatbuffer.cc",
],
hdrs = ["proto_to_flatbuffer.h"],
deps = [
":configuration_cc_proto",
":configuration_fbs",
"//tensorflow/lite:minimal_logging",
"@flatbuffers",
],
)
cc_library(
name = "delegate_registry",
srcs = ["delegate_registry.cc"],
hdrs = ["delegate_registry.h"],
deps = [
":configuration_fbs",
"//tensorflow/lite/c:common",
"@com_google_absl//absl/synchronization",
],
)
cc_library(
name = "nnapi_plugin",
deps = [
":nnapi_plugin_impl",
],
)
cc_library(
name = "nnapi_plugin_impl",
srcs = ["nnapi_plugin.cc"],
hdrs = ["nnapi_plugin.h"],
visibility = [
"//tensorflow/lite/experimental/acceleration/configuration/c:__pkg__",
],
deps = [
":configuration_fbs",
":delegate_registry",
"//tensorflow/lite/c:common",
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
"//tensorflow/lite/experimental/acceleration/configuration/c:delegate_plugin",
"@com_google_absl//absl/memory",
],
alwayslink = 1, # For registration to always run.
)
cc_test(
name = "nnapi_plugin_test",
srcs = ["nnapi_plugin_test.cc"],
tags = [
"no_mac",
"no_windows",
"tflite_not_portable_ios",
],
deps = [
":configuration_fbs",
":delegate_registry",
":nnapi_plugin",
"//tensorflow/lite:framework",
"//tensorflow/lite/c:common",
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
"//tensorflow/lite/delegates/nnapi:nnapi_delegate_mock_test",
"//tensorflow/lite/kernels:test_util",
"@com_google_googletest//:gtest_main",
"@flatbuffers",
],
)
cc_library(
name = "hexagon_plugin",
srcs = ["hexagon_plugin.cc"],
deps = [
":configuration_fbs",
":delegate_registry",
"@com_google_absl//absl/memory",
] + select({
"//tensorflow:android": [
"//tensorflow/lite/delegates/hexagon:hexagon_delegate",
],
"//conditions:default": [],
}),
alwayslink = 1, # For registration to always run.
)
cc_library(
name = "gpu_plugin",
srcs = ["gpu_plugin.cc"],
deps = [
":configuration_fbs",
":delegate_registry",
"//tensorflow/lite/delegates/gpu:delegate",
"@com_google_absl//absl/memory",
],
alwayslink = 1, # For registration to always run.
)
cc_library(
name = "xnnpack_plugin",
srcs = ["xnnpack_plugin.cc"],
deps = [
":configuration_fbs",
":delegate_registry",
"//tensorflow/lite/delegates/xnnpack:xnnpack_delegate",
"@com_google_absl//absl/memory",
],
alwayslink = 1, # For registration to always run.
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/BUILD | Starlark | apache-2.0 | 5,313 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# C API for delegate plugins.
load("//tensorflow:tensorflow.bzl", "get_compatible_with_portable")
package(
default_visibility = ["//visibility:private"],
licenses = ["notice"],
)
cc_library(
name = "delegate_plugin",
hdrs = ["delegate_plugin.h"],
compatible_with = get_compatible_with_portable(),
visibility = ["//visibility:public"],
deps = [
"//tensorflow/lite/c:common",
],
)
cc_library(
name = "nnapi_plugin",
srcs = ["nnapi_plugin.cc"],
hdrs = ["nnapi_plugin.h"],
visibility = ["//visibility:public"],
deps = [
":delegate_plugin",
"//tensorflow/lite/c:common",
"//tensorflow/lite/delegates/nnapi:nnapi_delegate",
"//tensorflow/lite/experimental/acceleration/configuration:configuration_fbs",
"//tensorflow/lite/experimental/acceleration/configuration:nnapi_plugin_impl",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/c/BUILD | Starlark | apache-2.0 | 1,579 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_C_DELEGATE_PLUGIN_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_C_DELEGATE_PLUGIN_H_
// C API types for TF Lite delegate plugins.
#include "tensorflow/lite/c/common.h"
#ifdef __cplusplus
extern "C" {
#endif
// Type of function to allocate and construct a delegate.
// The tflite_settings parameter should be a pointer to a FlatBuffer table
// object of type tflite::TFLiteSettings. (We use 'void *' here since this
// is a C API so we don't want to directly reference C++ types such
// as tflite::TFLiteSettings.)
typedef TfLiteDelegate *TfLiteDelegatePluginCreateFunc(
const void *tflite_settings);
// Type of function to destroy and deallocate a delegate.
// The delegate argument must have been created with the corresponding
// create function from the same delegate plugin.
typedef void TfLiteDelegatePluginDestroyFunc(TfLiteDelegate *);
// Type of function to return an error code for the last delegate operation.
// The delegate argument must have been created with the corresponding
// create function from the same delegate plugin.
typedef int TfLiteDelegatePluginGetDelegateErrnoFunc(TfLiteDelegate *);
// Struct to hold all the methods for a delegate plugin.
typedef struct TfLiteDelegatePlugin {
// Function to allocate and construct a delegate.
TfLiteDelegatePluginCreateFunc *create;
// Function to deallocate a delegate.
TfLiteDelegatePluginDestroyFunc *destroy;
// Function to return an error code for the last delegate operation.
TfLiteDelegatePluginGetDelegateErrnoFunc *get_delegate_errno;
} TfLiteDelegatePlugin;
#ifdef __cplusplus
}; // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_C_DELEGATE_PLUGIN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/c/delegate_plugin.h | C | apache-2.0 | 2,447 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file implements the Delegate Plugin for the NNAPI Delegate.
// It provides both
#include "tensorflow/lite/experimental/acceleration/configuration/c/nnapi_plugin.h"
#include <memory>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/configuration/nnapi_plugin.h"
extern "C" {
static TfLiteDelegate* CreateDelegate(const void* settings) {
const ::tflite::TFLiteSettings* tflite_settings =
static_cast<const ::tflite::TFLiteSettings*>(settings);
tflite::delegates::NnapiPlugin nnapi_plugin(*tflite_settings);
return new tflite::StatefulNnApiDelegate(nnapi_plugin.Options());
}
static void DestroyDelegate(TfLiteDelegate* delegate) {
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
}
static int DelegateErrno(TfLiteDelegate* from_delegate) {
auto nnapi_delegate =
static_cast<tflite::StatefulNnApiDelegate*>(from_delegate);
return nnapi_delegate->GetNnApiErrno();
}
static constexpr TfLiteDelegatePlugin kPluginCApi{
CreateDelegate,
DestroyDelegate,
DelegateErrno,
};
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi() {
return &kPluginCApi;
}
} // extern "C"
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/c/nnapi_plugin.cc | C++ | apache-2.0 | 1,999 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
// This header file is for the delegate plugin for NNAPI.
//
// For the C++ delegate plugin interface, the NNAPI delegate plugin is added to
// the DelegatePluginRegistry by the side effect of a constructor for a static
// object, so there's no public API needed for this plugin, other than the API
// of tflite::delegates::DelegatePluginRegistry, which is declared in
// delegate_registry.h.
//
// But to provide a C API to access the NNAPI delegate plugin, we do expose
// some functions, which are declared below.
#include "tensorflow/lite/experimental/acceleration/configuration/c/delegate_plugin.h"
#ifdef __cplusplus
extern "C" {
#endif
// C API for the NNAPI delegate plugin.
// Returns a pointer to a statically allocated table of function pointers.
const TfLiteDelegatePlugin* TfLiteNnapiDelegatePluginCApi();
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_C_NNAPI_PLUGIN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/c/nnapi_plugin.h | C | apache-2.0 | 1,786 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// automatically generated by the FlatBuffers compiler, do not modify
#ifndef FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_
#define FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_
#include "flatbuffers/flatbuffers.h"
namespace tflite {
struct ComputeSettings;
struct ComputeSettingsT;
struct NNAPISettings;
struct NNAPISettingsT;
struct GPUSettings;
struct GPUSettingsT;
struct HexagonSettings;
struct HexagonSettingsT;
struct XNNPackSettings;
struct XNNPackSettingsT;
struct EdgeTpuDeviceSpec;
struct EdgeTpuDeviceSpecT;
struct EdgeTpuInactivePowerConfig;
struct EdgeTpuInactivePowerConfigT;
struct EdgeTpuSettings;
struct EdgeTpuSettingsT;
struct CoralSettings;
struct CoralSettingsT;
struct CPUSettings;
struct CPUSettingsT;
struct TFLiteSettings;
struct TFLiteSettingsT;
struct FallbackSettings;
struct FallbackSettingsT;
struct BenchmarkMetric;
struct BenchmarkMetricT;
struct BenchmarkResult;
struct BenchmarkResultT;
struct ErrorCode;
struct ErrorCodeT;
struct BenchmarkError;
struct BenchmarkErrorT;
struct BenchmarkEvent;
struct BenchmarkEventT;
bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs);
bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs);
bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs);
bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs);
bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs);
bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs);
bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs);
bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs);
bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs);
bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs);
bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs);
bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs);
bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs);
bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs);
bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs);
bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs);
bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs);
bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs);
bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs);
bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs);
bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs);
bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs);
bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs);
bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs);
bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs);
bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs);
bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs);
bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs);
bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs);
bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs);
bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs);
bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs);
bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs);
bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs);
enum ExecutionPreference {
ExecutionPreference_ANY = 0,
ExecutionPreference_LOW_LATENCY = 1,
ExecutionPreference_LOW_POWER = 2,
ExecutionPreference_FORCE_CPU = 3,
ExecutionPreference_MIN = ExecutionPreference_ANY,
ExecutionPreference_MAX = ExecutionPreference_FORCE_CPU
};
inline const ExecutionPreference (&EnumValuesExecutionPreference())[4] {
static const ExecutionPreference values[] = {
ExecutionPreference_ANY,
ExecutionPreference_LOW_LATENCY,
ExecutionPreference_LOW_POWER,
ExecutionPreference_FORCE_CPU
};
return values;
}
inline const char * const *EnumNamesExecutionPreference() {
static const char * const names[5] = {
"ANY",
"LOW_LATENCY",
"LOW_POWER",
"FORCE_CPU",
nullptr
};
return names;
}
inline const char *EnumNameExecutionPreference(ExecutionPreference e) {
if (flatbuffers::IsOutRange(e, ExecutionPreference_ANY, ExecutionPreference_FORCE_CPU)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesExecutionPreference()[index];
}
enum Delegate {
Delegate_NONE = 0,
Delegate_NNAPI = 1,
Delegate_GPU = 2,
Delegate_HEXAGON = 3,
Delegate_XNNPACK = 4,
Delegate_EDGETPU = 5,
Delegate_EDGETPU_CORAL = 6,
Delegate_MIN = Delegate_NONE,
Delegate_MAX = Delegate_EDGETPU_CORAL
};
inline const Delegate (&EnumValuesDelegate())[7] {
static const Delegate values[] = {
Delegate_NONE,
Delegate_NNAPI,
Delegate_GPU,
Delegate_HEXAGON,
Delegate_XNNPACK,
Delegate_EDGETPU,
Delegate_EDGETPU_CORAL
};
return values;
}
inline const char * const *EnumNamesDelegate() {
static const char * const names[8] = {
"NONE",
"NNAPI",
"GPU",
"HEXAGON",
"XNNPACK",
"EDGETPU",
"EDGETPU_CORAL",
nullptr
};
return names;
}
inline const char *EnumNameDelegate(Delegate e) {
if (flatbuffers::IsOutRange(e, Delegate_NONE, Delegate_EDGETPU_CORAL)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesDelegate()[index];
}
enum NNAPIExecutionPreference {
NNAPIExecutionPreference_UNDEFINED = 0,
NNAPIExecutionPreference_NNAPI_LOW_POWER = 1,
NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER = 2,
NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED = 3,
NNAPIExecutionPreference_MIN = NNAPIExecutionPreference_UNDEFINED,
NNAPIExecutionPreference_MAX = NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED
};
inline const NNAPIExecutionPreference (&EnumValuesNNAPIExecutionPreference())[4] {
static const NNAPIExecutionPreference values[] = {
NNAPIExecutionPreference_UNDEFINED,
NNAPIExecutionPreference_NNAPI_LOW_POWER,
NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED
};
return values;
}
inline const char * const *EnumNamesNNAPIExecutionPreference() {
static const char * const names[5] = {
"UNDEFINED",
"NNAPI_LOW_POWER",
"NNAPI_FAST_SINGLE_ANSWER",
"NNAPI_SUSTAINED_SPEED",
nullptr
};
return names;
}
inline const char *EnumNameNNAPIExecutionPreference(NNAPIExecutionPreference e) {
if (flatbuffers::IsOutRange(e, NNAPIExecutionPreference_UNDEFINED, NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesNNAPIExecutionPreference()[index];
}
enum NNAPIExecutionPriority {
NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED = 0,
NNAPIExecutionPriority_NNAPI_PRIORITY_LOW = 1,
NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM = 2,
NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH = 3,
NNAPIExecutionPriority_MIN = NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
NNAPIExecutionPriority_MAX = NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH
};
inline const NNAPIExecutionPriority (&EnumValuesNNAPIExecutionPriority())[4] {
static const NNAPIExecutionPriority values[] = {
NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH
};
return values;
}
inline const char * const *EnumNamesNNAPIExecutionPriority() {
static const char * const names[5] = {
"NNAPI_PRIORITY_UNDEFINED",
"NNAPI_PRIORITY_LOW",
"NNAPI_PRIORITY_MEDIUM",
"NNAPI_PRIORITY_HIGH",
nullptr
};
return names;
}
inline const char *EnumNameNNAPIExecutionPriority(NNAPIExecutionPriority e) {
if (flatbuffers::IsOutRange(e, NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED, NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesNNAPIExecutionPriority()[index];
}
enum GPUBackend {
GPUBackend_UNSET = 0,
GPUBackend_OPENCL = 1,
GPUBackend_OPENGL = 2,
GPUBackend_MIN = GPUBackend_UNSET,
GPUBackend_MAX = GPUBackend_OPENGL
};
inline const GPUBackend (&EnumValuesGPUBackend())[3] {
static const GPUBackend values[] = {
GPUBackend_UNSET,
GPUBackend_OPENCL,
GPUBackend_OPENGL
};
return values;
}
inline const char * const *EnumNamesGPUBackend() {
static const char * const names[4] = {
"UNSET",
"OPENCL",
"OPENGL",
nullptr
};
return names;
}
inline const char *EnumNameGPUBackend(GPUBackend e) {
if (flatbuffers::IsOutRange(e, GPUBackend_UNSET, GPUBackend_OPENGL)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesGPUBackend()[index];
}
namespace EdgeTpuDeviceSpec_ {
enum PlatformType {
PlatformType_MMIO = 0,
PlatformType_REFERENCE = 1,
PlatformType_SIMULATOR = 2,
PlatformType_REMOTE_SIMULATOR = 3,
PlatformType_MIN = PlatformType_MMIO,
PlatformType_MAX = PlatformType_REMOTE_SIMULATOR
};
inline const PlatformType (&EnumValuesPlatformType())[4] {
static const PlatformType values[] = {
PlatformType_MMIO,
PlatformType_REFERENCE,
PlatformType_SIMULATOR,
PlatformType_REMOTE_SIMULATOR
};
return values;
}
inline const char * const *EnumNamesPlatformType() {
static const char * const names[5] = {
"MMIO",
"REFERENCE",
"SIMULATOR",
"REMOTE_SIMULATOR",
nullptr
};
return names;
}
inline const char *EnumNamePlatformType(PlatformType e) {
if (flatbuffers::IsOutRange(e, PlatformType_MMIO, PlatformType_REMOTE_SIMULATOR)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesPlatformType()[index];
}
} // namespace EdgeTpuDeviceSpec_
enum EdgeTpuPowerState {
EdgeTpuPowerState_UNDEFINED_POWERSTATE = 0,
EdgeTpuPowerState_TPU_CORE_OFF = 1,
EdgeTpuPowerState_READY = 2,
EdgeTpuPowerState_ACTIVE_MIN_POWER = 3,
EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER = 4,
EdgeTpuPowerState_ACTIVE_LOW_POWER = 5,
EdgeTpuPowerState_ACTIVE = 6,
EdgeTpuPowerState_OVER_DRIVE = 7,
EdgeTpuPowerState_MIN = EdgeTpuPowerState_UNDEFINED_POWERSTATE,
EdgeTpuPowerState_MAX = EdgeTpuPowerState_OVER_DRIVE
};
inline const EdgeTpuPowerState (&EnumValuesEdgeTpuPowerState())[8] {
static const EdgeTpuPowerState values[] = {
EdgeTpuPowerState_UNDEFINED_POWERSTATE,
EdgeTpuPowerState_TPU_CORE_OFF,
EdgeTpuPowerState_READY,
EdgeTpuPowerState_ACTIVE_MIN_POWER,
EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER,
EdgeTpuPowerState_ACTIVE_LOW_POWER,
EdgeTpuPowerState_ACTIVE,
EdgeTpuPowerState_OVER_DRIVE
};
return values;
}
inline const char * const *EnumNamesEdgeTpuPowerState() {
static const char * const names[9] = {
"UNDEFINED_POWERSTATE",
"TPU_CORE_OFF",
"READY",
"ACTIVE_MIN_POWER",
"ACTIVE_VERY_LOW_POWER",
"ACTIVE_LOW_POWER",
"ACTIVE",
"OVER_DRIVE",
nullptr
};
return names;
}
inline const char *EnumNameEdgeTpuPowerState(EdgeTpuPowerState e) {
if (flatbuffers::IsOutRange(e, EdgeTpuPowerState_UNDEFINED_POWERSTATE, EdgeTpuPowerState_OVER_DRIVE)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesEdgeTpuPowerState()[index];
}
namespace CoralSettings_ {
enum Performance {
Performance_UNDEFINED = 0,
Performance_MAXIMUM = 1,
Performance_HIGH = 2,
Performance_MEDIUM = 3,
Performance_LOW = 4,
Performance_MIN = Performance_UNDEFINED,
Performance_MAX = Performance_LOW
};
inline const Performance (&EnumValuesPerformance())[5] {
static const Performance values[] = {
Performance_UNDEFINED,
Performance_MAXIMUM,
Performance_HIGH,
Performance_MEDIUM,
Performance_LOW
};
return values;
}
inline const char * const *EnumNamesPerformance() {
static const char * const names[6] = {
"UNDEFINED",
"MAXIMUM",
"HIGH",
"MEDIUM",
"LOW",
nullptr
};
return names;
}
inline const char *EnumNamePerformance(Performance e) {
if (flatbuffers::IsOutRange(e, Performance_UNDEFINED, Performance_LOW)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesPerformance()[index];
}
} // namespace CoralSettings_
enum BenchmarkEventType {
BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE = 0,
BenchmarkEventType_START = 1,
BenchmarkEventType_END = 2,
BenchmarkEventType_ERROR = 3,
BenchmarkEventType_LOGGED = 4,
BenchmarkEventType_MIN = BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE,
BenchmarkEventType_MAX = BenchmarkEventType_LOGGED
};
inline const BenchmarkEventType (&EnumValuesBenchmarkEventType())[5] {
static const BenchmarkEventType values[] = {
BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE,
BenchmarkEventType_START,
BenchmarkEventType_END,
BenchmarkEventType_ERROR,
BenchmarkEventType_LOGGED
};
return values;
}
inline const char * const *EnumNamesBenchmarkEventType() {
static const char * const names[6] = {
"UNDEFINED_BENCHMARK_EVENT_TYPE",
"START",
"END",
"ERROR",
"LOGGED",
nullptr
};
return names;
}
inline const char *EnumNameBenchmarkEventType(BenchmarkEventType e) {
if (flatbuffers::IsOutRange(e, BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE, BenchmarkEventType_LOGGED)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesBenchmarkEventType()[index];
}
enum BenchmarkStage {
BenchmarkStage_UNKNOWN = 0,
BenchmarkStage_INITIALIZATION = 1,
BenchmarkStage_INFERENCE = 2,
BenchmarkStage_MIN = BenchmarkStage_UNKNOWN,
BenchmarkStage_MAX = BenchmarkStage_INFERENCE
};
inline const BenchmarkStage (&EnumValuesBenchmarkStage())[3] {
static const BenchmarkStage values[] = {
BenchmarkStage_UNKNOWN,
BenchmarkStage_INITIALIZATION,
BenchmarkStage_INFERENCE
};
return values;
}
inline const char * const *EnumNamesBenchmarkStage() {
static const char * const names[4] = {
"UNKNOWN",
"INITIALIZATION",
"INFERENCE",
nullptr
};
return names;
}
inline const char *EnumNameBenchmarkStage(BenchmarkStage e) {
if (flatbuffers::IsOutRange(e, BenchmarkStage_UNKNOWN, BenchmarkStage_INFERENCE)) return "";
const size_t index = static_cast<size_t>(e);
return EnumNamesBenchmarkStage()[index];
}
struct ComputeSettingsT : public flatbuffers::NativeTable {
typedef ComputeSettings TableType;
tflite::ExecutionPreference preference;
std::unique_ptr<tflite::TFLiteSettingsT> tflite_settings;
std::string model_namespace_for_statistics;
std::string model_identifier_for_statistics;
ComputeSettingsT()
: preference(tflite::ExecutionPreference_ANY) {
}
};
struct ComputeSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ComputeSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PREFERENCE = 4,
VT_TFLITE_SETTINGS = 6,
VT_MODEL_NAMESPACE_FOR_STATISTICS = 8,
VT_MODEL_IDENTIFIER_FOR_STATISTICS = 10
};
tflite::ExecutionPreference preference() const {
return static_cast<tflite::ExecutionPreference>(GetField<int32_t>(VT_PREFERENCE, 0));
}
const tflite::TFLiteSettings *tflite_settings() const {
return GetPointer<const tflite::TFLiteSettings *>(VT_TFLITE_SETTINGS);
}
const flatbuffers::String *model_namespace_for_statistics() const {
return GetPointer<const flatbuffers::String *>(VT_MODEL_NAMESPACE_FOR_STATISTICS);
}
const flatbuffers::String *model_identifier_for_statistics() const {
return GetPointer<const flatbuffers::String *>(VT_MODEL_IDENTIFIER_FOR_STATISTICS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_PREFERENCE) &&
VerifyOffset(verifier, VT_TFLITE_SETTINGS) &&
verifier.VerifyTable(tflite_settings()) &&
VerifyOffset(verifier, VT_MODEL_NAMESPACE_FOR_STATISTICS) &&
verifier.VerifyString(model_namespace_for_statistics()) &&
VerifyOffset(verifier, VT_MODEL_IDENTIFIER_FOR_STATISTICS) &&
verifier.VerifyString(model_identifier_for_statistics()) &&
verifier.EndTable();
}
ComputeSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(ComputeSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<ComputeSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct ComputeSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_preference(tflite::ExecutionPreference preference) {
fbb_.AddElement<int32_t>(ComputeSettings::VT_PREFERENCE, static_cast<int32_t>(preference), 0);
}
void add_tflite_settings(flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings) {
fbb_.AddOffset(ComputeSettings::VT_TFLITE_SETTINGS, tflite_settings);
}
void add_model_namespace_for_statistics(flatbuffers::Offset<flatbuffers::String> model_namespace_for_statistics) {
fbb_.AddOffset(ComputeSettings::VT_MODEL_NAMESPACE_FOR_STATISTICS, model_namespace_for_statistics);
}
void add_model_identifier_for_statistics(flatbuffers::Offset<flatbuffers::String> model_identifier_for_statistics) {
fbb_.AddOffset(ComputeSettings::VT_MODEL_IDENTIFIER_FOR_STATISTICS, model_identifier_for_statistics);
}
explicit ComputeSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
ComputeSettingsBuilder &operator=(const ComputeSettingsBuilder &);
flatbuffers::Offset<ComputeSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<ComputeSettings>(end);
return o;
}
};
inline flatbuffers::Offset<ComputeSettings> CreateComputeSettings(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY,
flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0,
flatbuffers::Offset<flatbuffers::String> model_namespace_for_statistics = 0,
flatbuffers::Offset<flatbuffers::String> model_identifier_for_statistics = 0) {
ComputeSettingsBuilder builder_(_fbb);
builder_.add_model_identifier_for_statistics(model_identifier_for_statistics);
builder_.add_model_namespace_for_statistics(model_namespace_for_statistics);
builder_.add_tflite_settings(tflite_settings);
builder_.add_preference(preference);
return builder_.Finish();
}
inline flatbuffers::Offset<ComputeSettings> CreateComputeSettingsDirect(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::ExecutionPreference preference = tflite::ExecutionPreference_ANY,
flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0,
const char *model_namespace_for_statistics = nullptr,
const char *model_identifier_for_statistics = nullptr) {
auto model_namespace_for_statistics__ = model_namespace_for_statistics ? _fbb.CreateString(model_namespace_for_statistics) : 0;
auto model_identifier_for_statistics__ = model_identifier_for_statistics ? _fbb.CreateString(model_identifier_for_statistics) : 0;
return tflite::CreateComputeSettings(
_fbb,
preference,
tflite_settings,
model_namespace_for_statistics__,
model_identifier_for_statistics__);
}
flatbuffers::Offset<ComputeSettings> CreateComputeSettings(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct NNAPISettingsT : public flatbuffers::NativeTable {
typedef NNAPISettings TableType;
std::string accelerator_name;
std::string cache_directory;
std::string model_token;
tflite::NNAPIExecutionPreference execution_preference;
int32_t no_of_nnapi_instances_to_cache;
std::unique_ptr<tflite::FallbackSettingsT> fallback_settings;
bool allow_nnapi_cpu_on_android_10_plus;
tflite::NNAPIExecutionPriority execution_priority;
bool allow_dynamic_dimensions;
bool allow_fp16_precision_for_fp32;
NNAPISettingsT()
: execution_preference(tflite::NNAPIExecutionPreference_UNDEFINED),
no_of_nnapi_instances_to_cache(0),
allow_nnapi_cpu_on_android_10_plus(false),
execution_priority(tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED),
allow_dynamic_dimensions(false),
allow_fp16_precision_for_fp32(false) {
}
};
struct NNAPISettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef NNAPISettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ACCELERATOR_NAME = 4,
VT_CACHE_DIRECTORY = 6,
VT_MODEL_TOKEN = 8,
VT_EXECUTION_PREFERENCE = 10,
VT_NO_OF_NNAPI_INSTANCES_TO_CACHE = 12,
VT_FALLBACK_SETTINGS = 14,
VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS = 16,
VT_EXECUTION_PRIORITY = 18,
VT_ALLOW_DYNAMIC_DIMENSIONS = 20,
VT_ALLOW_FP16_PRECISION_FOR_FP32 = 22
};
const flatbuffers::String *accelerator_name() const {
return GetPointer<const flatbuffers::String *>(VT_ACCELERATOR_NAME);
}
const flatbuffers::String *cache_directory() const {
return GetPointer<const flatbuffers::String *>(VT_CACHE_DIRECTORY);
}
const flatbuffers::String *model_token() const {
return GetPointer<const flatbuffers::String *>(VT_MODEL_TOKEN);
}
tflite::NNAPIExecutionPreference execution_preference() const {
return static_cast<tflite::NNAPIExecutionPreference>(GetField<int32_t>(VT_EXECUTION_PREFERENCE, 0));
}
int32_t no_of_nnapi_instances_to_cache() const {
return GetField<int32_t>(VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, 0);
}
const tflite::FallbackSettings *fallback_settings() const {
return GetPointer<const tflite::FallbackSettings *>(VT_FALLBACK_SETTINGS);
}
bool allow_nnapi_cpu_on_android_10_plus() const {
return GetField<uint8_t>(VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, 0) != 0;
}
tflite::NNAPIExecutionPriority execution_priority() const {
return static_cast<tflite::NNAPIExecutionPriority>(GetField<int32_t>(VT_EXECUTION_PRIORITY, 0));
}
bool allow_dynamic_dimensions() const {
return GetField<uint8_t>(VT_ALLOW_DYNAMIC_DIMENSIONS, 0) != 0;
}
bool allow_fp16_precision_for_fp32() const {
return GetField<uint8_t>(VT_ALLOW_FP16_PRECISION_FOR_FP32, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_ACCELERATOR_NAME) &&
verifier.VerifyString(accelerator_name()) &&
VerifyOffset(verifier, VT_CACHE_DIRECTORY) &&
verifier.VerifyString(cache_directory()) &&
VerifyOffset(verifier, VT_MODEL_TOKEN) &&
verifier.VerifyString(model_token()) &&
VerifyField<int32_t>(verifier, VT_EXECUTION_PREFERENCE) &&
VerifyField<int32_t>(verifier, VT_NO_OF_NNAPI_INSTANCES_TO_CACHE) &&
VerifyOffset(verifier, VT_FALLBACK_SETTINGS) &&
verifier.VerifyTable(fallback_settings()) &&
VerifyField<uint8_t>(verifier, VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS) &&
VerifyField<int32_t>(verifier, VT_EXECUTION_PRIORITY) &&
VerifyField<uint8_t>(verifier, VT_ALLOW_DYNAMIC_DIMENSIONS) &&
VerifyField<uint8_t>(verifier, VT_ALLOW_FP16_PRECISION_FOR_FP32) &&
verifier.EndTable();
}
NNAPISettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(NNAPISettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<NNAPISettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct NNAPISettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_accelerator_name(flatbuffers::Offset<flatbuffers::String> accelerator_name) {
fbb_.AddOffset(NNAPISettings::VT_ACCELERATOR_NAME, accelerator_name);
}
void add_cache_directory(flatbuffers::Offset<flatbuffers::String> cache_directory) {
fbb_.AddOffset(NNAPISettings::VT_CACHE_DIRECTORY, cache_directory);
}
void add_model_token(flatbuffers::Offset<flatbuffers::String> model_token) {
fbb_.AddOffset(NNAPISettings::VT_MODEL_TOKEN, model_token);
}
void add_execution_preference(tflite::NNAPIExecutionPreference execution_preference) {
fbb_.AddElement<int32_t>(NNAPISettings::VT_EXECUTION_PREFERENCE, static_cast<int32_t>(execution_preference), 0);
}
void add_no_of_nnapi_instances_to_cache(int32_t no_of_nnapi_instances_to_cache) {
fbb_.AddElement<int32_t>(NNAPISettings::VT_NO_OF_NNAPI_INSTANCES_TO_CACHE, no_of_nnapi_instances_to_cache, 0);
}
void add_fallback_settings(flatbuffers::Offset<tflite::FallbackSettings> fallback_settings) {
fbb_.AddOffset(NNAPISettings::VT_FALLBACK_SETTINGS, fallback_settings);
}
void add_allow_nnapi_cpu_on_android_10_plus(bool allow_nnapi_cpu_on_android_10_plus) {
fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_NNAPI_CPU_ON_ANDROID_10_PLUS, static_cast<uint8_t>(allow_nnapi_cpu_on_android_10_plus), 0);
}
void add_execution_priority(tflite::NNAPIExecutionPriority execution_priority) {
fbb_.AddElement<int32_t>(NNAPISettings::VT_EXECUTION_PRIORITY, static_cast<int32_t>(execution_priority), 0);
}
void add_allow_dynamic_dimensions(bool allow_dynamic_dimensions) {
fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_DYNAMIC_DIMENSIONS, static_cast<uint8_t>(allow_dynamic_dimensions), 0);
}
void add_allow_fp16_precision_for_fp32(bool allow_fp16_precision_for_fp32) {
fbb_.AddElement<uint8_t>(NNAPISettings::VT_ALLOW_FP16_PRECISION_FOR_FP32, static_cast<uint8_t>(allow_fp16_precision_for_fp32), 0);
}
explicit NNAPISettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
NNAPISettingsBuilder &operator=(const NNAPISettingsBuilder &);
flatbuffers::Offset<NNAPISettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<NNAPISettings>(end);
return o;
}
};
inline flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> accelerator_name = 0,
flatbuffers::Offset<flatbuffers::String> cache_directory = 0,
flatbuffers::Offset<flatbuffers::String> model_token = 0,
tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED,
int32_t no_of_nnapi_instances_to_cache = 0,
flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0,
bool allow_nnapi_cpu_on_android_10_plus = false,
tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
bool allow_dynamic_dimensions = false,
bool allow_fp16_precision_for_fp32 = false) {
NNAPISettingsBuilder builder_(_fbb);
builder_.add_execution_priority(execution_priority);
builder_.add_fallback_settings(fallback_settings);
builder_.add_no_of_nnapi_instances_to_cache(no_of_nnapi_instances_to_cache);
builder_.add_execution_preference(execution_preference);
builder_.add_model_token(model_token);
builder_.add_cache_directory(cache_directory);
builder_.add_accelerator_name(accelerator_name);
builder_.add_allow_fp16_precision_for_fp32(allow_fp16_precision_for_fp32);
builder_.add_allow_dynamic_dimensions(allow_dynamic_dimensions);
builder_.add_allow_nnapi_cpu_on_android_10_plus(allow_nnapi_cpu_on_android_10_plus);
return builder_.Finish();
}
inline flatbuffers::Offset<NNAPISettings> CreateNNAPISettingsDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *accelerator_name = nullptr,
const char *cache_directory = nullptr,
const char *model_token = nullptr,
tflite::NNAPIExecutionPreference execution_preference = tflite::NNAPIExecutionPreference_UNDEFINED,
int32_t no_of_nnapi_instances_to_cache = 0,
flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0,
bool allow_nnapi_cpu_on_android_10_plus = false,
tflite::NNAPIExecutionPriority execution_priority = tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
bool allow_dynamic_dimensions = false,
bool allow_fp16_precision_for_fp32 = false) {
auto accelerator_name__ = accelerator_name ? _fbb.CreateString(accelerator_name) : 0;
auto cache_directory__ = cache_directory ? _fbb.CreateString(cache_directory) : 0;
auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0;
return tflite::CreateNNAPISettings(
_fbb,
accelerator_name__,
cache_directory__,
model_token__,
execution_preference,
no_of_nnapi_instances_to_cache,
fallback_settings,
allow_nnapi_cpu_on_android_10_plus,
execution_priority,
allow_dynamic_dimensions,
allow_fp16_precision_for_fp32);
}
flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct GPUSettingsT : public flatbuffers::NativeTable {
typedef GPUSettings TableType;
bool is_precision_loss_allowed;
bool enable_quantized_inference;
tflite::GPUBackend force_backend;
GPUSettingsT()
: is_precision_loss_allowed(false),
enable_quantized_inference(true),
force_backend(tflite::GPUBackend_UNSET) {
}
};
struct GPUSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef GPUSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_IS_PRECISION_LOSS_ALLOWED = 4,
VT_ENABLE_QUANTIZED_INFERENCE = 6,
VT_FORCE_BACKEND = 8
};
bool is_precision_loss_allowed() const {
return GetField<uint8_t>(VT_IS_PRECISION_LOSS_ALLOWED, 0) != 0;
}
bool enable_quantized_inference() const {
return GetField<uint8_t>(VT_ENABLE_QUANTIZED_INFERENCE, 1) != 0;
}
tflite::GPUBackend force_backend() const {
return static_cast<tflite::GPUBackend>(GetField<int32_t>(VT_FORCE_BACKEND, 0));
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_IS_PRECISION_LOSS_ALLOWED) &&
VerifyField<uint8_t>(verifier, VT_ENABLE_QUANTIZED_INFERENCE) &&
VerifyField<int32_t>(verifier, VT_FORCE_BACKEND) &&
verifier.EndTable();
}
GPUSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(GPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<GPUSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct GPUSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_is_precision_loss_allowed(bool is_precision_loss_allowed) {
fbb_.AddElement<uint8_t>(GPUSettings::VT_IS_PRECISION_LOSS_ALLOWED, static_cast<uint8_t>(is_precision_loss_allowed), 0);
}
void add_enable_quantized_inference(bool enable_quantized_inference) {
fbb_.AddElement<uint8_t>(GPUSettings::VT_ENABLE_QUANTIZED_INFERENCE, static_cast<uint8_t>(enable_quantized_inference), 1);
}
void add_force_backend(tflite::GPUBackend force_backend) {
fbb_.AddElement<int32_t>(GPUSettings::VT_FORCE_BACKEND, static_cast<int32_t>(force_backend), 0);
}
explicit GPUSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
GPUSettingsBuilder &operator=(const GPUSettingsBuilder &);
flatbuffers::Offset<GPUSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<GPUSettings>(end);
return o;
}
};
inline flatbuffers::Offset<GPUSettings> CreateGPUSettings(
flatbuffers::FlatBufferBuilder &_fbb,
bool is_precision_loss_allowed = false,
bool enable_quantized_inference = true,
tflite::GPUBackend force_backend = tflite::GPUBackend_UNSET) {
GPUSettingsBuilder builder_(_fbb);
builder_.add_force_backend(force_backend);
builder_.add_enable_quantized_inference(enable_quantized_inference);
builder_.add_is_precision_loss_allowed(is_precision_loss_allowed);
return builder_.Finish();
}
flatbuffers::Offset<GPUSettings> CreateGPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct HexagonSettingsT : public flatbuffers::NativeTable {
typedef HexagonSettings TableType;
int32_t debug_level;
int32_t powersave_level;
bool print_graph_profile;
bool print_graph_debug;
HexagonSettingsT()
: debug_level(0),
powersave_level(0),
print_graph_profile(false),
print_graph_debug(false) {
}
};
struct HexagonSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef HexagonSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DEBUG_LEVEL = 4,
VT_POWERSAVE_LEVEL = 6,
VT_PRINT_GRAPH_PROFILE = 8,
VT_PRINT_GRAPH_DEBUG = 10
};
int32_t debug_level() const {
return GetField<int32_t>(VT_DEBUG_LEVEL, 0);
}
int32_t powersave_level() const {
return GetField<int32_t>(VT_POWERSAVE_LEVEL, 0);
}
bool print_graph_profile() const {
return GetField<uint8_t>(VT_PRINT_GRAPH_PROFILE, 0) != 0;
}
bool print_graph_debug() const {
return GetField<uint8_t>(VT_PRINT_GRAPH_DEBUG, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_DEBUG_LEVEL) &&
VerifyField<int32_t>(verifier, VT_POWERSAVE_LEVEL) &&
VerifyField<uint8_t>(verifier, VT_PRINT_GRAPH_PROFILE) &&
VerifyField<uint8_t>(verifier, VT_PRINT_GRAPH_DEBUG) &&
verifier.EndTable();
}
HexagonSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(HexagonSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<HexagonSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct HexagonSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_debug_level(int32_t debug_level) {
fbb_.AddElement<int32_t>(HexagonSettings::VT_DEBUG_LEVEL, debug_level, 0);
}
void add_powersave_level(int32_t powersave_level) {
fbb_.AddElement<int32_t>(HexagonSettings::VT_POWERSAVE_LEVEL, powersave_level, 0);
}
void add_print_graph_profile(bool print_graph_profile) {
fbb_.AddElement<uint8_t>(HexagonSettings::VT_PRINT_GRAPH_PROFILE, static_cast<uint8_t>(print_graph_profile), 0);
}
void add_print_graph_debug(bool print_graph_debug) {
fbb_.AddElement<uint8_t>(HexagonSettings::VT_PRINT_GRAPH_DEBUG, static_cast<uint8_t>(print_graph_debug), 0);
}
explicit HexagonSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
HexagonSettingsBuilder &operator=(const HexagonSettingsBuilder &);
flatbuffers::Offset<HexagonSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<HexagonSettings>(end);
return o;
}
};
inline flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t debug_level = 0,
int32_t powersave_level = 0,
bool print_graph_profile = false,
bool print_graph_debug = false) {
HexagonSettingsBuilder builder_(_fbb);
builder_.add_powersave_level(powersave_level);
builder_.add_debug_level(debug_level);
builder_.add_print_graph_debug(print_graph_debug);
builder_.add_print_graph_profile(print_graph_profile);
return builder_.Finish();
}
flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct XNNPackSettingsT : public flatbuffers::NativeTable {
typedef XNNPackSettings TableType;
int32_t num_threads;
XNNPackSettingsT()
: num_threads(0) {
}
};
struct XNNPackSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef XNNPackSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NUM_THREADS = 4
};
int32_t num_threads() const {
return GetField<int32_t>(VT_NUM_THREADS, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_NUM_THREADS) &&
verifier.EndTable();
}
XNNPackSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(XNNPackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<XNNPackSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct XNNPackSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_num_threads(int32_t num_threads) {
fbb_.AddElement<int32_t>(XNNPackSettings::VT_NUM_THREADS, num_threads, 0);
}
explicit XNNPackSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
XNNPackSettingsBuilder &operator=(const XNNPackSettingsBuilder &);
flatbuffers::Offset<XNNPackSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<XNNPackSettings>(end);
return o;
}
};
inline flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t num_threads = 0) {
XNNPackSettingsBuilder builder_(_fbb);
builder_.add_num_threads(num_threads);
return builder_.Finish();
}
flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct EdgeTpuDeviceSpecT : public flatbuffers::NativeTable {
typedef EdgeTpuDeviceSpec TableType;
tflite::EdgeTpuDeviceSpec_::PlatformType platform_type;
int32_t num_chips;
std::vector<std::string> device_paths;
int32_t chip_family;
EdgeTpuDeviceSpecT()
: platform_type(tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO),
num_chips(0),
chip_family(0) {
}
};
struct EdgeTpuDeviceSpec FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef EdgeTpuDeviceSpecT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_PLATFORM_TYPE = 4,
VT_NUM_CHIPS = 6,
VT_DEVICE_PATHS = 8,
VT_CHIP_FAMILY = 10
};
tflite::EdgeTpuDeviceSpec_::PlatformType platform_type() const {
return static_cast<tflite::EdgeTpuDeviceSpec_::PlatformType>(GetField<int32_t>(VT_PLATFORM_TYPE, 0));
}
int32_t num_chips() const {
return GetField<int32_t>(VT_NUM_CHIPS, 0);
}
const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *device_paths() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>> *>(VT_DEVICE_PATHS);
}
int32_t chip_family() const {
return GetField<int32_t>(VT_CHIP_FAMILY, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_PLATFORM_TYPE) &&
VerifyField<int32_t>(verifier, VT_NUM_CHIPS) &&
VerifyOffset(verifier, VT_DEVICE_PATHS) &&
verifier.VerifyVector(device_paths()) &&
verifier.VerifyVectorOfStrings(device_paths()) &&
VerifyField<int32_t>(verifier, VT_CHIP_FAMILY) &&
verifier.EndTable();
}
EdgeTpuDeviceSpecT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(EdgeTpuDeviceSpecT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<EdgeTpuDeviceSpec> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct EdgeTpuDeviceSpecBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_platform_type(tflite::EdgeTpuDeviceSpec_::PlatformType platform_type) {
fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_PLATFORM_TYPE, static_cast<int32_t>(platform_type), 0);
}
void add_num_chips(int32_t num_chips) {
fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_NUM_CHIPS, num_chips, 0);
}
void add_device_paths(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> device_paths) {
fbb_.AddOffset(EdgeTpuDeviceSpec::VT_DEVICE_PATHS, device_paths);
}
void add_chip_family(int32_t chip_family) {
fbb_.AddElement<int32_t>(EdgeTpuDeviceSpec::VT_CHIP_FAMILY, chip_family, 0);
}
explicit EdgeTpuDeviceSpecBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
EdgeTpuDeviceSpecBuilder &operator=(const EdgeTpuDeviceSpecBuilder &);
flatbuffers::Offset<EdgeTpuDeviceSpec> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<EdgeTpuDeviceSpec>(end);
return o;
}
};
inline flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO,
int32_t num_chips = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<flatbuffers::String>>> device_paths = 0,
int32_t chip_family = 0) {
EdgeTpuDeviceSpecBuilder builder_(_fbb);
builder_.add_chip_family(chip_family);
builder_.add_device_paths(device_paths);
builder_.add_num_chips(num_chips);
builder_.add_platform_type(platform_type);
return builder_.Finish();
}
inline flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpecDirect(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::EdgeTpuDeviceSpec_::PlatformType platform_type = tflite::EdgeTpuDeviceSpec_::PlatformType_MMIO,
int32_t num_chips = 0,
const std::vector<flatbuffers::Offset<flatbuffers::String>> *device_paths = nullptr,
int32_t chip_family = 0) {
auto device_paths__ = device_paths ? _fbb.CreateVector<flatbuffers::Offset<flatbuffers::String>>(*device_paths) : 0;
return tflite::CreateEdgeTpuDeviceSpec(
_fbb,
platform_type,
num_chips,
device_paths__,
chip_family);
}
flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct EdgeTpuInactivePowerConfigT : public flatbuffers::NativeTable {
typedef EdgeTpuInactivePowerConfig TableType;
tflite::EdgeTpuPowerState inactive_power_state;
int64_t inactive_timeout_us;
EdgeTpuInactivePowerConfigT()
: inactive_power_state(tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE),
inactive_timeout_us(0) {
}
};
struct EdgeTpuInactivePowerConfig FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef EdgeTpuInactivePowerConfigT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INACTIVE_POWER_STATE = 4,
VT_INACTIVE_TIMEOUT_US = 6
};
tflite::EdgeTpuPowerState inactive_power_state() const {
return static_cast<tflite::EdgeTpuPowerState>(GetField<int32_t>(VT_INACTIVE_POWER_STATE, 0));
}
int64_t inactive_timeout_us() const {
return GetField<int64_t>(VT_INACTIVE_TIMEOUT_US, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_INACTIVE_POWER_STATE) &&
VerifyField<int64_t>(verifier, VT_INACTIVE_TIMEOUT_US) &&
verifier.EndTable();
}
EdgeTpuInactivePowerConfigT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(EdgeTpuInactivePowerConfigT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<EdgeTpuInactivePowerConfig> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct EdgeTpuInactivePowerConfigBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_inactive_power_state(tflite::EdgeTpuPowerState inactive_power_state) {
fbb_.AddElement<int32_t>(EdgeTpuInactivePowerConfig::VT_INACTIVE_POWER_STATE, static_cast<int32_t>(inactive_power_state), 0);
}
void add_inactive_timeout_us(int64_t inactive_timeout_us) {
fbb_.AddElement<int64_t>(EdgeTpuInactivePowerConfig::VT_INACTIVE_TIMEOUT_US, inactive_timeout_us, 0);
}
explicit EdgeTpuInactivePowerConfigBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
EdgeTpuInactivePowerConfigBuilder &operator=(const EdgeTpuInactivePowerConfigBuilder &);
flatbuffers::Offset<EdgeTpuInactivePowerConfig> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<EdgeTpuInactivePowerConfig>(end);
return o;
}
};
inline flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::EdgeTpuPowerState inactive_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE,
int64_t inactive_timeout_us = 0) {
EdgeTpuInactivePowerConfigBuilder builder_(_fbb);
builder_.add_inactive_timeout_us(inactive_timeout_us);
builder_.add_inactive_power_state(inactive_power_state);
return builder_.Finish();
}
flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct EdgeTpuSettingsT : public flatbuffers::NativeTable {
typedef EdgeTpuSettings TableType;
tflite::EdgeTpuPowerState inference_power_state;
std::vector<std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT>> inactive_power_configs;
int32_t inference_priority;
std::unique_ptr<tflite::EdgeTpuDeviceSpecT> edgetpu_device_spec;
std::string model_token;
EdgeTpuSettingsT()
: inference_power_state(tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE),
inference_priority(-1) {
}
};
struct EdgeTpuSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef EdgeTpuSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INFERENCE_POWER_STATE = 4,
VT_INACTIVE_POWER_CONFIGS = 6,
VT_INFERENCE_PRIORITY = 8,
VT_EDGETPU_DEVICE_SPEC = 10,
VT_MODEL_TOKEN = 12
};
tflite::EdgeTpuPowerState inference_power_state() const {
return static_cast<tflite::EdgeTpuPowerState>(GetField<int32_t>(VT_INFERENCE_POWER_STATE, 0));
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *inactive_power_configs() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *>(VT_INACTIVE_POWER_CONFIGS);
}
int32_t inference_priority() const {
return GetField<int32_t>(VT_INFERENCE_PRIORITY, -1);
}
const tflite::EdgeTpuDeviceSpec *edgetpu_device_spec() const {
return GetPointer<const tflite::EdgeTpuDeviceSpec *>(VT_EDGETPU_DEVICE_SPEC);
}
const flatbuffers::String *model_token() const {
return GetPointer<const flatbuffers::String *>(VT_MODEL_TOKEN);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_INFERENCE_POWER_STATE) &&
VerifyOffset(verifier, VT_INACTIVE_POWER_CONFIGS) &&
verifier.VerifyVector(inactive_power_configs()) &&
verifier.VerifyVectorOfTables(inactive_power_configs()) &&
VerifyField<int32_t>(verifier, VT_INFERENCE_PRIORITY) &&
VerifyOffset(verifier, VT_EDGETPU_DEVICE_SPEC) &&
verifier.VerifyTable(edgetpu_device_spec()) &&
VerifyOffset(verifier, VT_MODEL_TOKEN) &&
verifier.VerifyString(model_token()) &&
verifier.EndTable();
}
EdgeTpuSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(EdgeTpuSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<EdgeTpuSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct EdgeTpuSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_inference_power_state(tflite::EdgeTpuPowerState inference_power_state) {
fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_INFERENCE_POWER_STATE, static_cast<int32_t>(inference_power_state), 0);
}
void add_inactive_power_configs(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>> inactive_power_configs) {
fbb_.AddOffset(EdgeTpuSettings::VT_INACTIVE_POWER_CONFIGS, inactive_power_configs);
}
void add_inference_priority(int32_t inference_priority) {
fbb_.AddElement<int32_t>(EdgeTpuSettings::VT_INFERENCE_PRIORITY, inference_priority, -1);
}
void add_edgetpu_device_spec(flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec) {
fbb_.AddOffset(EdgeTpuSettings::VT_EDGETPU_DEVICE_SPEC, edgetpu_device_spec);
}
void add_model_token(flatbuffers::Offset<flatbuffers::String> model_token) {
fbb_.AddOffset(EdgeTpuSettings::VT_MODEL_TOKEN, model_token);
}
explicit EdgeTpuSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
EdgeTpuSettingsBuilder &operator=(const EdgeTpuSettingsBuilder &);
flatbuffers::Offset<EdgeTpuSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<EdgeTpuSettings>(end);
return o;
}
};
inline flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>> inactive_power_configs = 0,
int32_t inference_priority = -1,
flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0,
flatbuffers::Offset<flatbuffers::String> model_token = 0) {
EdgeTpuSettingsBuilder builder_(_fbb);
builder_.add_model_token(model_token);
builder_.add_edgetpu_device_spec(edgetpu_device_spec);
builder_.add_inference_priority(inference_priority);
builder_.add_inactive_power_configs(inactive_power_configs);
builder_.add_inference_power_state(inference_power_state);
return builder_.Finish();
}
inline flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettingsDirect(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::EdgeTpuPowerState inference_power_state = tflite::EdgeTpuPowerState_UNDEFINED_POWERSTATE,
const std::vector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> *inactive_power_configs = nullptr,
int32_t inference_priority = -1,
flatbuffers::Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0,
const char *model_token = nullptr) {
auto inactive_power_configs__ = inactive_power_configs ? _fbb.CreateVector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>>(*inactive_power_configs) : 0;
auto model_token__ = model_token ? _fbb.CreateString(model_token) : 0;
return tflite::CreateEdgeTpuSettings(
_fbb,
inference_power_state,
inactive_power_configs__,
inference_priority,
edgetpu_device_spec,
model_token__);
}
flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct CoralSettingsT : public flatbuffers::NativeTable {
typedef CoralSettings TableType;
std::string device;
tflite::CoralSettings_::Performance performance;
bool usb_always_dfu;
int32_t usb_max_bulk_in_queue_length;
CoralSettingsT()
: performance(tflite::CoralSettings_::Performance_UNDEFINED),
usb_always_dfu(false),
usb_max_bulk_in_queue_length(0) {
}
};
struct CoralSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CoralSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DEVICE = 4,
VT_PERFORMANCE = 6,
VT_USB_ALWAYS_DFU = 8,
VT_USB_MAX_BULK_IN_QUEUE_LENGTH = 10
};
const flatbuffers::String *device() const {
return GetPointer<const flatbuffers::String *>(VT_DEVICE);
}
tflite::CoralSettings_::Performance performance() const {
return static_cast<tflite::CoralSettings_::Performance>(GetField<int32_t>(VT_PERFORMANCE, 0));
}
bool usb_always_dfu() const {
return GetField<uint8_t>(VT_USB_ALWAYS_DFU, 0) != 0;
}
int32_t usb_max_bulk_in_queue_length() const {
return GetField<int32_t>(VT_USB_MAX_BULK_IN_QUEUE_LENGTH, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_DEVICE) &&
verifier.VerifyString(device()) &&
VerifyField<int32_t>(verifier, VT_PERFORMANCE) &&
VerifyField<uint8_t>(verifier, VT_USB_ALWAYS_DFU) &&
VerifyField<int32_t>(verifier, VT_USB_MAX_BULK_IN_QUEUE_LENGTH) &&
verifier.EndTable();
}
CoralSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(CoralSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<CoralSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct CoralSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_device(flatbuffers::Offset<flatbuffers::String> device) {
fbb_.AddOffset(CoralSettings::VT_DEVICE, device);
}
void add_performance(tflite::CoralSettings_::Performance performance) {
fbb_.AddElement<int32_t>(CoralSettings::VT_PERFORMANCE, static_cast<int32_t>(performance), 0);
}
void add_usb_always_dfu(bool usb_always_dfu) {
fbb_.AddElement<uint8_t>(CoralSettings::VT_USB_ALWAYS_DFU, static_cast<uint8_t>(usb_always_dfu), 0);
}
void add_usb_max_bulk_in_queue_length(int32_t usb_max_bulk_in_queue_length) {
fbb_.AddElement<int32_t>(CoralSettings::VT_USB_MAX_BULK_IN_QUEUE_LENGTH, usb_max_bulk_in_queue_length, 0);
}
explicit CoralSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
CoralSettingsBuilder &operator=(const CoralSettingsBuilder &);
flatbuffers::Offset<CoralSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CoralSettings>(end);
return o;
}
};
inline flatbuffers::Offset<CoralSettings> CreateCoralSettings(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> device = 0,
tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED,
bool usb_always_dfu = false,
int32_t usb_max_bulk_in_queue_length = 0) {
CoralSettingsBuilder builder_(_fbb);
builder_.add_usb_max_bulk_in_queue_length(usb_max_bulk_in_queue_length);
builder_.add_performance(performance);
builder_.add_device(device);
builder_.add_usb_always_dfu(usb_always_dfu);
return builder_.Finish();
}
inline flatbuffers::Offset<CoralSettings> CreateCoralSettingsDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *device = nullptr,
tflite::CoralSettings_::Performance performance = tflite::CoralSettings_::Performance_UNDEFINED,
bool usb_always_dfu = false,
int32_t usb_max_bulk_in_queue_length = 0) {
auto device__ = device ? _fbb.CreateString(device) : 0;
return tflite::CreateCoralSettings(
_fbb,
device__,
performance,
usb_always_dfu,
usb_max_bulk_in_queue_length);
}
flatbuffers::Offset<CoralSettings> CreateCoralSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct CPUSettingsT : public flatbuffers::NativeTable {
typedef CPUSettings TableType;
int32_t num_threads;
CPUSettingsT()
: num_threads(0) {
}
};
struct CPUSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef CPUSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NUM_THREADS = 4
};
int32_t num_threads() const {
return GetField<int32_t>(VT_NUM_THREADS, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_NUM_THREADS) &&
verifier.EndTable();
}
CPUSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(CPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<CPUSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct CPUSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_num_threads(int32_t num_threads) {
fbb_.AddElement<int32_t>(CPUSettings::VT_NUM_THREADS, num_threads, 0);
}
explicit CPUSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
CPUSettingsBuilder &operator=(const CPUSettingsBuilder &);
flatbuffers::Offset<CPUSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<CPUSettings>(end);
return o;
}
};
inline flatbuffers::Offset<CPUSettings> CreateCPUSettings(
flatbuffers::FlatBufferBuilder &_fbb,
int32_t num_threads = 0) {
CPUSettingsBuilder builder_(_fbb);
builder_.add_num_threads(num_threads);
return builder_.Finish();
}
flatbuffers::Offset<CPUSettings> CreateCPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct TFLiteSettingsT : public flatbuffers::NativeTable {
typedef TFLiteSettings TableType;
tflite::Delegate delegate;
std::unique_ptr<tflite::NNAPISettingsT> nnapi_settings;
std::unique_ptr<tflite::GPUSettingsT> gpu_settings;
std::unique_ptr<tflite::HexagonSettingsT> hexagon_settings;
std::unique_ptr<tflite::XNNPackSettingsT> xnnpack_settings;
std::unique_ptr<tflite::CPUSettingsT> cpu_settings;
int32_t max_delegated_partitions;
std::unique_ptr<tflite::EdgeTpuSettingsT> edgetpu_settings;
std::unique_ptr<tflite::CoralSettingsT> coral_settings;
std::unique_ptr<tflite::FallbackSettingsT> fallback_settings;
TFLiteSettingsT()
: delegate(tflite::Delegate_NONE),
max_delegated_partitions(0) {
}
};
struct TFLiteSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef TFLiteSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_DELEGATE = 4,
VT_NNAPI_SETTINGS = 6,
VT_GPU_SETTINGS = 8,
VT_HEXAGON_SETTINGS = 10,
VT_XNNPACK_SETTINGS = 12,
VT_CPU_SETTINGS = 14,
VT_MAX_DELEGATED_PARTITIONS = 16,
VT_EDGETPU_SETTINGS = 18,
VT_CORAL_SETTINGS = 20,
VT_FALLBACK_SETTINGS = 22
};
tflite::Delegate delegate() const {
return static_cast<tflite::Delegate>(GetField<int32_t>(VT_DELEGATE, 0));
}
const tflite::NNAPISettings *nnapi_settings() const {
return GetPointer<const tflite::NNAPISettings *>(VT_NNAPI_SETTINGS);
}
const tflite::GPUSettings *gpu_settings() const {
return GetPointer<const tflite::GPUSettings *>(VT_GPU_SETTINGS);
}
const tflite::HexagonSettings *hexagon_settings() const {
return GetPointer<const tflite::HexagonSettings *>(VT_HEXAGON_SETTINGS);
}
const tflite::XNNPackSettings *xnnpack_settings() const {
return GetPointer<const tflite::XNNPackSettings *>(VT_XNNPACK_SETTINGS);
}
const tflite::CPUSettings *cpu_settings() const {
return GetPointer<const tflite::CPUSettings *>(VT_CPU_SETTINGS);
}
int32_t max_delegated_partitions() const {
return GetField<int32_t>(VT_MAX_DELEGATED_PARTITIONS, 0);
}
const tflite::EdgeTpuSettings *edgetpu_settings() const {
return GetPointer<const tflite::EdgeTpuSettings *>(VT_EDGETPU_SETTINGS);
}
const tflite::CoralSettings *coral_settings() const {
return GetPointer<const tflite::CoralSettings *>(VT_CORAL_SETTINGS);
}
const tflite::FallbackSettings *fallback_settings() const {
return GetPointer<const tflite::FallbackSettings *>(VT_FALLBACK_SETTINGS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_DELEGATE) &&
VerifyOffset(verifier, VT_NNAPI_SETTINGS) &&
verifier.VerifyTable(nnapi_settings()) &&
VerifyOffset(verifier, VT_GPU_SETTINGS) &&
verifier.VerifyTable(gpu_settings()) &&
VerifyOffset(verifier, VT_HEXAGON_SETTINGS) &&
verifier.VerifyTable(hexagon_settings()) &&
VerifyOffset(verifier, VT_XNNPACK_SETTINGS) &&
verifier.VerifyTable(xnnpack_settings()) &&
VerifyOffset(verifier, VT_CPU_SETTINGS) &&
verifier.VerifyTable(cpu_settings()) &&
VerifyField<int32_t>(verifier, VT_MAX_DELEGATED_PARTITIONS) &&
VerifyOffset(verifier, VT_EDGETPU_SETTINGS) &&
verifier.VerifyTable(edgetpu_settings()) &&
VerifyOffset(verifier, VT_CORAL_SETTINGS) &&
verifier.VerifyTable(coral_settings()) &&
VerifyOffset(verifier, VT_FALLBACK_SETTINGS) &&
verifier.VerifyTable(fallback_settings()) &&
verifier.EndTable();
}
TFLiteSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(TFLiteSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<TFLiteSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct TFLiteSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_delegate(tflite::Delegate delegate) {
fbb_.AddElement<int32_t>(TFLiteSettings::VT_DELEGATE, static_cast<int32_t>(delegate), 0);
}
void add_nnapi_settings(flatbuffers::Offset<tflite::NNAPISettings> nnapi_settings) {
fbb_.AddOffset(TFLiteSettings::VT_NNAPI_SETTINGS, nnapi_settings);
}
void add_gpu_settings(flatbuffers::Offset<tflite::GPUSettings> gpu_settings) {
fbb_.AddOffset(TFLiteSettings::VT_GPU_SETTINGS, gpu_settings);
}
void add_hexagon_settings(flatbuffers::Offset<tflite::HexagonSettings> hexagon_settings) {
fbb_.AddOffset(TFLiteSettings::VT_HEXAGON_SETTINGS, hexagon_settings);
}
void add_xnnpack_settings(flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings) {
fbb_.AddOffset(TFLiteSettings::VT_XNNPACK_SETTINGS, xnnpack_settings);
}
void add_cpu_settings(flatbuffers::Offset<tflite::CPUSettings> cpu_settings) {
fbb_.AddOffset(TFLiteSettings::VT_CPU_SETTINGS, cpu_settings);
}
void add_max_delegated_partitions(int32_t max_delegated_partitions) {
fbb_.AddElement<int32_t>(TFLiteSettings::VT_MAX_DELEGATED_PARTITIONS, max_delegated_partitions, 0);
}
void add_edgetpu_settings(flatbuffers::Offset<tflite::EdgeTpuSettings> edgetpu_settings) {
fbb_.AddOffset(TFLiteSettings::VT_EDGETPU_SETTINGS, edgetpu_settings);
}
void add_coral_settings(flatbuffers::Offset<tflite::CoralSettings> coral_settings) {
fbb_.AddOffset(TFLiteSettings::VT_CORAL_SETTINGS, coral_settings);
}
void add_fallback_settings(flatbuffers::Offset<tflite::FallbackSettings> fallback_settings) {
fbb_.AddOffset(TFLiteSettings::VT_FALLBACK_SETTINGS, fallback_settings);
}
explicit TFLiteSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
TFLiteSettingsBuilder &operator=(const TFLiteSettingsBuilder &);
flatbuffers::Offset<TFLiteSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<TFLiteSettings>(end);
return o;
}
};
inline flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::Delegate delegate = tflite::Delegate_NONE,
flatbuffers::Offset<tflite::NNAPISettings> nnapi_settings = 0,
flatbuffers::Offset<tflite::GPUSettings> gpu_settings = 0,
flatbuffers::Offset<tflite::HexagonSettings> hexagon_settings = 0,
flatbuffers::Offset<tflite::XNNPackSettings> xnnpack_settings = 0,
flatbuffers::Offset<tflite::CPUSettings> cpu_settings = 0,
int32_t max_delegated_partitions = 0,
flatbuffers::Offset<tflite::EdgeTpuSettings> edgetpu_settings = 0,
flatbuffers::Offset<tflite::CoralSettings> coral_settings = 0,
flatbuffers::Offset<tflite::FallbackSettings> fallback_settings = 0) {
TFLiteSettingsBuilder builder_(_fbb);
builder_.add_fallback_settings(fallback_settings);
builder_.add_coral_settings(coral_settings);
builder_.add_edgetpu_settings(edgetpu_settings);
builder_.add_max_delegated_partitions(max_delegated_partitions);
builder_.add_cpu_settings(cpu_settings);
builder_.add_xnnpack_settings(xnnpack_settings);
builder_.add_hexagon_settings(hexagon_settings);
builder_.add_gpu_settings(gpu_settings);
builder_.add_nnapi_settings(nnapi_settings);
builder_.add_delegate(delegate);
return builder_.Finish();
}
flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct FallbackSettingsT : public flatbuffers::NativeTable {
typedef FallbackSettings TableType;
bool allow_automatic_fallback_on_compilation_error;
bool allow_automatic_fallback_on_execution_error;
FallbackSettingsT()
: allow_automatic_fallback_on_compilation_error(false),
allow_automatic_fallback_on_execution_error(false) {
}
};
struct FallbackSettings FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef FallbackSettingsT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR = 4,
VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR = 6
};
bool allow_automatic_fallback_on_compilation_error() const {
return GetField<uint8_t>(VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, 0) != 0;
}
bool allow_automatic_fallback_on_execution_error() const {
return GetField<uint8_t>(VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, 0) != 0;
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<uint8_t>(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR) &&
VerifyField<uint8_t>(verifier, VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR) &&
verifier.EndTable();
}
FallbackSettingsT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(FallbackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<FallbackSettings> Pack(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct FallbackSettingsBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_allow_automatic_fallback_on_compilation_error(bool allow_automatic_fallback_on_compilation_error) {
fbb_.AddElement<uint8_t>(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_COMPILATION_ERROR, static_cast<uint8_t>(allow_automatic_fallback_on_compilation_error), 0);
}
void add_allow_automatic_fallback_on_execution_error(bool allow_automatic_fallback_on_execution_error) {
fbb_.AddElement<uint8_t>(FallbackSettings::VT_ALLOW_AUTOMATIC_FALLBACK_ON_EXECUTION_ERROR, static_cast<uint8_t>(allow_automatic_fallback_on_execution_error), 0);
}
explicit FallbackSettingsBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
FallbackSettingsBuilder &operator=(const FallbackSettingsBuilder &);
flatbuffers::Offset<FallbackSettings> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<FallbackSettings>(end);
return o;
}
};
inline flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(
flatbuffers::FlatBufferBuilder &_fbb,
bool allow_automatic_fallback_on_compilation_error = false,
bool allow_automatic_fallback_on_execution_error = false) {
FallbackSettingsBuilder builder_(_fbb);
builder_.add_allow_automatic_fallback_on_execution_error(allow_automatic_fallback_on_execution_error);
builder_.add_allow_automatic_fallback_on_compilation_error(allow_automatic_fallback_on_compilation_error);
return builder_.Finish();
}
flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct BenchmarkMetricT : public flatbuffers::NativeTable {
typedef BenchmarkMetric TableType;
std::string name;
std::vector<float> values;
BenchmarkMetricT() {
}
};
struct BenchmarkMetric FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef BenchmarkMetricT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_NAME = 4,
VT_VALUES = 6
};
const flatbuffers::String *name() const {
return GetPointer<const flatbuffers::String *>(VT_NAME);
}
const flatbuffers::Vector<float> *values() const {
return GetPointer<const flatbuffers::Vector<float> *>(VT_VALUES);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_NAME) &&
verifier.VerifyString(name()) &&
VerifyOffset(verifier, VT_VALUES) &&
verifier.VerifyVector(values()) &&
verifier.EndTable();
}
BenchmarkMetricT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(BenchmarkMetricT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<BenchmarkMetric> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct BenchmarkMetricBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_name(flatbuffers::Offset<flatbuffers::String> name) {
fbb_.AddOffset(BenchmarkMetric::VT_NAME, name);
}
void add_values(flatbuffers::Offset<flatbuffers::Vector<float>> values) {
fbb_.AddOffset(BenchmarkMetric::VT_VALUES, values);
}
explicit BenchmarkMetricBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
BenchmarkMetricBuilder &operator=(const BenchmarkMetricBuilder &);
flatbuffers::Offset<BenchmarkMetric> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<BenchmarkMetric>(end);
return o;
}
};
inline flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::String> name = 0,
flatbuffers::Offset<flatbuffers::Vector<float>> values = 0) {
BenchmarkMetricBuilder builder_(_fbb);
builder_.add_values(values);
builder_.add_name(name);
return builder_.Finish();
}
inline flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetricDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const char *name = nullptr,
const std::vector<float> *values = nullptr) {
auto name__ = name ? _fbb.CreateString(name) : 0;
auto values__ = values ? _fbb.CreateVector<float>(*values) : 0;
return tflite::CreateBenchmarkMetric(
_fbb,
name__,
values__);
}
flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct BenchmarkResultT : public flatbuffers::NativeTable {
typedef BenchmarkResult TableType;
std::vector<int64_t> initialization_time_us;
std::vector<int64_t> inference_time_us;
int32_t max_memory_kb;
bool ok;
std::vector<std::unique_ptr<tflite::BenchmarkMetricT>> metrics;
BenchmarkResultT()
: max_memory_kb(0),
ok(false) {
}
};
struct BenchmarkResult FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef BenchmarkResultT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_INITIALIZATION_TIME_US = 4,
VT_INFERENCE_TIME_US = 6,
VT_MAX_MEMORY_KB = 8,
VT_OK = 10,
VT_METRICS = 12
};
const flatbuffers::Vector<int64_t> *initialization_time_us() const {
return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INITIALIZATION_TIME_US);
}
const flatbuffers::Vector<int64_t> *inference_time_us() const {
return GetPointer<const flatbuffers::Vector<int64_t> *>(VT_INFERENCE_TIME_US);
}
int32_t max_memory_kb() const {
return GetField<int32_t>(VT_MAX_MEMORY_KB, 0);
}
bool ok() const {
return GetField<uint8_t>(VT_OK, 0) != 0;
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>> *metrics() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>> *>(VT_METRICS);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_INITIALIZATION_TIME_US) &&
verifier.VerifyVector(initialization_time_us()) &&
VerifyOffset(verifier, VT_INFERENCE_TIME_US) &&
verifier.VerifyVector(inference_time_us()) &&
VerifyField<int32_t>(verifier, VT_MAX_MEMORY_KB) &&
VerifyField<uint8_t>(verifier, VT_OK) &&
VerifyOffset(verifier, VT_METRICS) &&
verifier.VerifyVector(metrics()) &&
verifier.VerifyVectorOfTables(metrics()) &&
verifier.EndTable();
}
BenchmarkResultT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(BenchmarkResultT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<BenchmarkResult> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct BenchmarkResultBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_initialization_time_us(flatbuffers::Offset<flatbuffers::Vector<int64_t>> initialization_time_us) {
fbb_.AddOffset(BenchmarkResult::VT_INITIALIZATION_TIME_US, initialization_time_us);
}
void add_inference_time_us(flatbuffers::Offset<flatbuffers::Vector<int64_t>> inference_time_us) {
fbb_.AddOffset(BenchmarkResult::VT_INFERENCE_TIME_US, inference_time_us);
}
void add_max_memory_kb(int32_t max_memory_kb) {
fbb_.AddElement<int32_t>(BenchmarkResult::VT_MAX_MEMORY_KB, max_memory_kb, 0);
}
void add_ok(bool ok) {
fbb_.AddElement<uint8_t>(BenchmarkResult::VT_OK, static_cast<uint8_t>(ok), 0);
}
void add_metrics(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>>> metrics) {
fbb_.AddOffset(BenchmarkResult::VT_METRICS, metrics);
}
explicit BenchmarkResultBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
BenchmarkResultBuilder &operator=(const BenchmarkResultBuilder &);
flatbuffers::Offset<BenchmarkResult> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<BenchmarkResult>(end);
return o;
}
};
inline flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<flatbuffers::Vector<int64_t>> initialization_time_us = 0,
flatbuffers::Offset<flatbuffers::Vector<int64_t>> inference_time_us = 0,
int32_t max_memory_kb = 0,
bool ok = false,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::BenchmarkMetric>>> metrics = 0) {
BenchmarkResultBuilder builder_(_fbb);
builder_.add_metrics(metrics);
builder_.add_max_memory_kb(max_memory_kb);
builder_.add_inference_time_us(inference_time_us);
builder_.add_initialization_time_us(initialization_time_us);
builder_.add_ok(ok);
return builder_.Finish();
}
inline flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResultDirect(
flatbuffers::FlatBufferBuilder &_fbb,
const std::vector<int64_t> *initialization_time_us = nullptr,
const std::vector<int64_t> *inference_time_us = nullptr,
int32_t max_memory_kb = 0,
bool ok = false,
const std::vector<flatbuffers::Offset<tflite::BenchmarkMetric>> *metrics = nullptr) {
auto initialization_time_us__ = initialization_time_us ? _fbb.CreateVector<int64_t>(*initialization_time_us) : 0;
auto inference_time_us__ = inference_time_us ? _fbb.CreateVector<int64_t>(*inference_time_us) : 0;
auto metrics__ = metrics ? _fbb.CreateVector<flatbuffers::Offset<tflite::BenchmarkMetric>>(*metrics) : 0;
return tflite::CreateBenchmarkResult(
_fbb,
initialization_time_us__,
inference_time_us__,
max_memory_kb,
ok,
metrics__);
}
flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct ErrorCodeT : public flatbuffers::NativeTable {
typedef ErrorCode TableType;
tflite::Delegate source;
int32_t tflite_error;
int64_t underlying_api_error;
ErrorCodeT()
: source(tflite::Delegate_NONE),
tflite_error(0),
underlying_api_error(0) {
}
};
struct ErrorCode FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef ErrorCodeT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_SOURCE = 4,
VT_TFLITE_ERROR = 6,
VT_UNDERLYING_API_ERROR = 8
};
tflite::Delegate source() const {
return static_cast<tflite::Delegate>(GetField<int32_t>(VT_SOURCE, 0));
}
int32_t tflite_error() const {
return GetField<int32_t>(VT_TFLITE_ERROR, 0);
}
int64_t underlying_api_error() const {
return GetField<int64_t>(VT_UNDERLYING_API_ERROR, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_SOURCE) &&
VerifyField<int32_t>(verifier, VT_TFLITE_ERROR) &&
VerifyField<int64_t>(verifier, VT_UNDERLYING_API_ERROR) &&
verifier.EndTable();
}
ErrorCodeT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(ErrorCodeT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<ErrorCode> Pack(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct ErrorCodeBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_source(tflite::Delegate source) {
fbb_.AddElement<int32_t>(ErrorCode::VT_SOURCE, static_cast<int32_t>(source), 0);
}
void add_tflite_error(int32_t tflite_error) {
fbb_.AddElement<int32_t>(ErrorCode::VT_TFLITE_ERROR, tflite_error, 0);
}
void add_underlying_api_error(int64_t underlying_api_error) {
fbb_.AddElement<int64_t>(ErrorCode::VT_UNDERLYING_API_ERROR, underlying_api_error, 0);
}
explicit ErrorCodeBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
ErrorCodeBuilder &operator=(const ErrorCodeBuilder &);
flatbuffers::Offset<ErrorCode> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<ErrorCode>(end);
return o;
}
};
inline flatbuffers::Offset<ErrorCode> CreateErrorCode(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::Delegate source = tflite::Delegate_NONE,
int32_t tflite_error = 0,
int64_t underlying_api_error = 0) {
ErrorCodeBuilder builder_(_fbb);
builder_.add_underlying_api_error(underlying_api_error);
builder_.add_tflite_error(tflite_error);
builder_.add_source(source);
return builder_.Finish();
}
flatbuffers::Offset<ErrorCode> CreateErrorCode(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct BenchmarkErrorT : public flatbuffers::NativeTable {
typedef BenchmarkError TableType;
tflite::BenchmarkStage stage;
int32_t exit_code;
int32_t signal;
std::vector<std::unique_ptr<tflite::ErrorCodeT>> error_code;
BenchmarkErrorT()
: stage(tflite::BenchmarkStage_UNKNOWN),
exit_code(0),
signal(0) {
}
};
struct BenchmarkError FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef BenchmarkErrorT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_STAGE = 4,
VT_EXIT_CODE = 6,
VT_SIGNAL = 8,
VT_ERROR_CODE = 10
};
tflite::BenchmarkStage stage() const {
return static_cast<tflite::BenchmarkStage>(GetField<int32_t>(VT_STAGE, 0));
}
int32_t exit_code() const {
return GetField<int32_t>(VT_EXIT_CODE, 0);
}
int32_t signal() const {
return GetField<int32_t>(VT_SIGNAL, 0);
}
const flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>> *error_code() const {
return GetPointer<const flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>> *>(VT_ERROR_CODE);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyField<int32_t>(verifier, VT_STAGE) &&
VerifyField<int32_t>(verifier, VT_EXIT_CODE) &&
VerifyField<int32_t>(verifier, VT_SIGNAL) &&
VerifyOffset(verifier, VT_ERROR_CODE) &&
verifier.VerifyVector(error_code()) &&
verifier.VerifyVectorOfTables(error_code()) &&
verifier.EndTable();
}
BenchmarkErrorT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(BenchmarkErrorT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<BenchmarkError> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct BenchmarkErrorBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_stage(tflite::BenchmarkStage stage) {
fbb_.AddElement<int32_t>(BenchmarkError::VT_STAGE, static_cast<int32_t>(stage), 0);
}
void add_exit_code(int32_t exit_code) {
fbb_.AddElement<int32_t>(BenchmarkError::VT_EXIT_CODE, exit_code, 0);
}
void add_signal(int32_t signal) {
fbb_.AddElement<int32_t>(BenchmarkError::VT_SIGNAL, signal, 0);
}
void add_error_code(flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>>> error_code) {
fbb_.AddOffset(BenchmarkError::VT_ERROR_CODE, error_code);
}
explicit BenchmarkErrorBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
BenchmarkErrorBuilder &operator=(const BenchmarkErrorBuilder &);
flatbuffers::Offset<BenchmarkError> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<BenchmarkError>(end);
return o;
}
};
inline flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN,
int32_t exit_code = 0,
int32_t signal = 0,
flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<tflite::ErrorCode>>> error_code = 0) {
BenchmarkErrorBuilder builder_(_fbb);
builder_.add_error_code(error_code);
builder_.add_signal(signal);
builder_.add_exit_code(exit_code);
builder_.add_stage(stage);
return builder_.Finish();
}
inline flatbuffers::Offset<BenchmarkError> CreateBenchmarkErrorDirect(
flatbuffers::FlatBufferBuilder &_fbb,
tflite::BenchmarkStage stage = tflite::BenchmarkStage_UNKNOWN,
int32_t exit_code = 0,
int32_t signal = 0,
const std::vector<flatbuffers::Offset<tflite::ErrorCode>> *error_code = nullptr) {
auto error_code__ = error_code ? _fbb.CreateVector<flatbuffers::Offset<tflite::ErrorCode>>(*error_code) : 0;
return tflite::CreateBenchmarkError(
_fbb,
stage,
exit_code,
signal,
error_code__);
}
flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
struct BenchmarkEventT : public flatbuffers::NativeTable {
typedef BenchmarkEvent TableType;
std::unique_ptr<tflite::TFLiteSettingsT> tflite_settings;
tflite::BenchmarkEventType event_type;
std::unique_ptr<tflite::BenchmarkResultT> result;
std::unique_ptr<tflite::BenchmarkErrorT> error;
int64_t boottime_us;
int64_t wallclock_us;
BenchmarkEventT()
: event_type(tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE),
boottime_us(0),
wallclock_us(0) {
}
};
struct BenchmarkEvent FLATBUFFERS_FINAL_CLASS : private flatbuffers::Table {
typedef BenchmarkEventT NativeTableType;
enum FlatBuffersVTableOffset FLATBUFFERS_VTABLE_UNDERLYING_TYPE {
VT_TFLITE_SETTINGS = 4,
VT_EVENT_TYPE = 6,
VT_RESULT = 8,
VT_ERROR = 10,
VT_BOOTTIME_US = 12,
VT_WALLCLOCK_US = 14
};
const tflite::TFLiteSettings *tflite_settings() const {
return GetPointer<const tflite::TFLiteSettings *>(VT_TFLITE_SETTINGS);
}
tflite::BenchmarkEventType event_type() const {
return static_cast<tflite::BenchmarkEventType>(GetField<int32_t>(VT_EVENT_TYPE, 0));
}
const tflite::BenchmarkResult *result() const {
return GetPointer<const tflite::BenchmarkResult *>(VT_RESULT);
}
const tflite::BenchmarkError *error() const {
return GetPointer<const tflite::BenchmarkError *>(VT_ERROR);
}
int64_t boottime_us() const {
return GetField<int64_t>(VT_BOOTTIME_US, 0);
}
int64_t wallclock_us() const {
return GetField<int64_t>(VT_WALLCLOCK_US, 0);
}
bool Verify(flatbuffers::Verifier &verifier) const {
return VerifyTableStart(verifier) &&
VerifyOffset(verifier, VT_TFLITE_SETTINGS) &&
verifier.VerifyTable(tflite_settings()) &&
VerifyField<int32_t>(verifier, VT_EVENT_TYPE) &&
VerifyOffset(verifier, VT_RESULT) &&
verifier.VerifyTable(result()) &&
VerifyOffset(verifier, VT_ERROR) &&
verifier.VerifyTable(error()) &&
VerifyField<int64_t>(verifier, VT_BOOTTIME_US) &&
VerifyField<int64_t>(verifier, VT_WALLCLOCK_US) &&
verifier.EndTable();
}
BenchmarkEventT *UnPack(const flatbuffers::resolver_function_t *_resolver = nullptr) const;
void UnPackTo(BenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver = nullptr) const;
static flatbuffers::Offset<BenchmarkEvent> Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
};
struct BenchmarkEventBuilder {
flatbuffers::FlatBufferBuilder &fbb_;
flatbuffers::uoffset_t start_;
void add_tflite_settings(flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings) {
fbb_.AddOffset(BenchmarkEvent::VT_TFLITE_SETTINGS, tflite_settings);
}
void add_event_type(tflite::BenchmarkEventType event_type) {
fbb_.AddElement<int32_t>(BenchmarkEvent::VT_EVENT_TYPE, static_cast<int32_t>(event_type), 0);
}
void add_result(flatbuffers::Offset<tflite::BenchmarkResult> result) {
fbb_.AddOffset(BenchmarkEvent::VT_RESULT, result);
}
void add_error(flatbuffers::Offset<tflite::BenchmarkError> error) {
fbb_.AddOffset(BenchmarkEvent::VT_ERROR, error);
}
void add_boottime_us(int64_t boottime_us) {
fbb_.AddElement<int64_t>(BenchmarkEvent::VT_BOOTTIME_US, boottime_us, 0);
}
void add_wallclock_us(int64_t wallclock_us) {
fbb_.AddElement<int64_t>(BenchmarkEvent::VT_WALLCLOCK_US, wallclock_us, 0);
}
explicit BenchmarkEventBuilder(flatbuffers::FlatBufferBuilder &_fbb)
: fbb_(_fbb) {
start_ = fbb_.StartTable();
}
BenchmarkEventBuilder &operator=(const BenchmarkEventBuilder &);
flatbuffers::Offset<BenchmarkEvent> Finish() {
const auto end = fbb_.EndTable(start_);
auto o = flatbuffers::Offset<BenchmarkEvent>(end);
return o;
}
};
inline flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(
flatbuffers::FlatBufferBuilder &_fbb,
flatbuffers::Offset<tflite::TFLiteSettings> tflite_settings = 0,
tflite::BenchmarkEventType event_type = tflite::BenchmarkEventType_UNDEFINED_BENCHMARK_EVENT_TYPE,
flatbuffers::Offset<tflite::BenchmarkResult> result = 0,
flatbuffers::Offset<tflite::BenchmarkError> error = 0,
int64_t boottime_us = 0,
int64_t wallclock_us = 0) {
BenchmarkEventBuilder builder_(_fbb);
builder_.add_wallclock_us(wallclock_us);
builder_.add_boottime_us(boottime_us);
builder_.add_error(error);
builder_.add_result(result);
builder_.add_event_type(event_type);
builder_.add_tflite_settings(tflite_settings);
return builder_.Finish();
}
flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher = nullptr);
inline bool operator==(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) {
return
(lhs.preference == rhs.preference) &&
((!lhs.tflite_settings && !rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings) || (lhs.tflite_settings && !rhs.tflite_settings && *lhs.tflite_settings == decltype(lhs.tflite_settings)::element_type()) || (rhs.tflite_settings && !lhs.tflite_settings && *rhs.tflite_settings == decltype(rhs.tflite_settings)::element_type())) &&
(lhs.model_namespace_for_statistics == rhs.model_namespace_for_statistics) &&
(lhs.model_identifier_for_statistics == rhs.model_identifier_for_statistics);
}
inline bool operator!=(const ComputeSettingsT &lhs, const ComputeSettingsT &rhs) {
return !(lhs == rhs);
}
inline ComputeSettingsT *ComputeSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new ComputeSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void ComputeSettings::UnPackTo(ComputeSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = preference(); _o->preference = _e; }
{ auto _e = tflite_settings(); if (_e) _o->tflite_settings = std::unique_ptr<tflite::TFLiteSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = model_namespace_for_statistics(); if (_e) _o->model_namespace_for_statistics = _e->str(); }
{ auto _e = model_identifier_for_statistics(); if (_e) _o->model_identifier_for_statistics = _e->str(); }
}
inline flatbuffers::Offset<ComputeSettings> ComputeSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateComputeSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<ComputeSettings> CreateComputeSettings(flatbuffers::FlatBufferBuilder &_fbb, const ComputeSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ComputeSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _preference = _o->preference;
auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0;
auto _model_namespace_for_statistics = _o->model_namespace_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_namespace_for_statistics);
auto _model_identifier_for_statistics = _o->model_identifier_for_statistics.empty() ? 0 : _fbb.CreateString(_o->model_identifier_for_statistics);
return tflite::CreateComputeSettings(
_fbb,
_preference,
_tflite_settings,
_model_namespace_for_statistics,
_model_identifier_for_statistics);
}
inline bool operator==(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) {
return
(lhs.accelerator_name == rhs.accelerator_name) &&
(lhs.cache_directory == rhs.cache_directory) &&
(lhs.model_token == rhs.model_token) &&
(lhs.execution_preference == rhs.execution_preference) &&
(lhs.no_of_nnapi_instances_to_cache == rhs.no_of_nnapi_instances_to_cache) &&
((!lhs.fallback_settings && !rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings) || (lhs.fallback_settings && !rhs.fallback_settings && *lhs.fallback_settings == decltype(lhs.fallback_settings)::element_type()) || (rhs.fallback_settings && !lhs.fallback_settings && *rhs.fallback_settings == decltype(rhs.fallback_settings)::element_type())) &&
(lhs.allow_nnapi_cpu_on_android_10_plus == rhs.allow_nnapi_cpu_on_android_10_plus) &&
(lhs.execution_priority == rhs.execution_priority) &&
(lhs.allow_dynamic_dimensions == rhs.allow_dynamic_dimensions) &&
(lhs.allow_fp16_precision_for_fp32 == rhs.allow_fp16_precision_for_fp32);
}
inline bool operator!=(const NNAPISettingsT &lhs, const NNAPISettingsT &rhs) {
return !(lhs == rhs);
}
inline NNAPISettingsT *NNAPISettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new NNAPISettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void NNAPISettings::UnPackTo(NNAPISettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = accelerator_name(); if (_e) _o->accelerator_name = _e->str(); }
{ auto _e = cache_directory(); if (_e) _o->cache_directory = _e->str(); }
{ auto _e = model_token(); if (_e) _o->model_token = _e->str(); }
{ auto _e = execution_preference(); _o->execution_preference = _e; }
{ auto _e = no_of_nnapi_instances_to_cache(); _o->no_of_nnapi_instances_to_cache = _e; }
{ auto _e = fallback_settings(); if (_e) _o->fallback_settings = std::unique_ptr<tflite::FallbackSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = allow_nnapi_cpu_on_android_10_plus(); _o->allow_nnapi_cpu_on_android_10_plus = _e; }
{ auto _e = execution_priority(); _o->execution_priority = _e; }
{ auto _e = allow_dynamic_dimensions(); _o->allow_dynamic_dimensions = _e; }
{ auto _e = allow_fp16_precision_for_fp32(); _o->allow_fp16_precision_for_fp32 = _e; }
}
inline flatbuffers::Offset<NNAPISettings> NNAPISettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateNNAPISettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<NNAPISettings> CreateNNAPISettings(flatbuffers::FlatBufferBuilder &_fbb, const NNAPISettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const NNAPISettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _accelerator_name = _o->accelerator_name.empty() ? 0 : _fbb.CreateString(_o->accelerator_name);
auto _cache_directory = _o->cache_directory.empty() ? 0 : _fbb.CreateString(_o->cache_directory);
auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token);
auto _execution_preference = _o->execution_preference;
auto _no_of_nnapi_instances_to_cache = _o->no_of_nnapi_instances_to_cache;
auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0;
auto _allow_nnapi_cpu_on_android_10_plus = _o->allow_nnapi_cpu_on_android_10_plus;
auto _execution_priority = _o->execution_priority;
auto _allow_dynamic_dimensions = _o->allow_dynamic_dimensions;
auto _allow_fp16_precision_for_fp32 = _o->allow_fp16_precision_for_fp32;
return tflite::CreateNNAPISettings(
_fbb,
_accelerator_name,
_cache_directory,
_model_token,
_execution_preference,
_no_of_nnapi_instances_to_cache,
_fallback_settings,
_allow_nnapi_cpu_on_android_10_plus,
_execution_priority,
_allow_dynamic_dimensions,
_allow_fp16_precision_for_fp32);
}
inline bool operator==(const GPUSettingsT &lhs, const GPUSettingsT &rhs) {
return
(lhs.is_precision_loss_allowed == rhs.is_precision_loss_allowed) &&
(lhs.enable_quantized_inference == rhs.enable_quantized_inference) &&
(lhs.force_backend == rhs.force_backend);
}
inline bool operator!=(const GPUSettingsT &lhs, const GPUSettingsT &rhs) {
return !(lhs == rhs);
}
inline GPUSettingsT *GPUSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new GPUSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void GPUSettings::UnPackTo(GPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = is_precision_loss_allowed(); _o->is_precision_loss_allowed = _e; }
{ auto _e = enable_quantized_inference(); _o->enable_quantized_inference = _e; }
{ auto _e = force_backend(); _o->force_backend = _e; }
}
inline flatbuffers::Offset<GPUSettings> GPUSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateGPUSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<GPUSettings> CreateGPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const GPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const GPUSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _is_precision_loss_allowed = _o->is_precision_loss_allowed;
auto _enable_quantized_inference = _o->enable_quantized_inference;
auto _force_backend = _o->force_backend;
return tflite::CreateGPUSettings(
_fbb,
_is_precision_loss_allowed,
_enable_quantized_inference,
_force_backend);
}
inline bool operator==(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) {
return
(lhs.debug_level == rhs.debug_level) &&
(lhs.powersave_level == rhs.powersave_level) &&
(lhs.print_graph_profile == rhs.print_graph_profile) &&
(lhs.print_graph_debug == rhs.print_graph_debug);
}
inline bool operator!=(const HexagonSettingsT &lhs, const HexagonSettingsT &rhs) {
return !(lhs == rhs);
}
inline HexagonSettingsT *HexagonSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new HexagonSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void HexagonSettings::UnPackTo(HexagonSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = debug_level(); _o->debug_level = _e; }
{ auto _e = powersave_level(); _o->powersave_level = _e; }
{ auto _e = print_graph_profile(); _o->print_graph_profile = _e; }
{ auto _e = print_graph_debug(); _o->print_graph_debug = _e; }
}
inline flatbuffers::Offset<HexagonSettings> HexagonSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateHexagonSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<HexagonSettings> CreateHexagonSettings(flatbuffers::FlatBufferBuilder &_fbb, const HexagonSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const HexagonSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _debug_level = _o->debug_level;
auto _powersave_level = _o->powersave_level;
auto _print_graph_profile = _o->print_graph_profile;
auto _print_graph_debug = _o->print_graph_debug;
return tflite::CreateHexagonSettings(
_fbb,
_debug_level,
_powersave_level,
_print_graph_profile,
_print_graph_debug);
}
inline bool operator==(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) {
return
(lhs.num_threads == rhs.num_threads);
}
inline bool operator!=(const XNNPackSettingsT &lhs, const XNNPackSettingsT &rhs) {
return !(lhs == rhs);
}
inline XNNPackSettingsT *XNNPackSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new XNNPackSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void XNNPackSettings::UnPackTo(XNNPackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = num_threads(); _o->num_threads = _e; }
}
inline flatbuffers::Offset<XNNPackSettings> XNNPackSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateXNNPackSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<XNNPackSettings> CreateXNNPackSettings(flatbuffers::FlatBufferBuilder &_fbb, const XNNPackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const XNNPackSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _num_threads = _o->num_threads;
return tflite::CreateXNNPackSettings(
_fbb,
_num_threads);
}
inline bool operator==(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) {
return
(lhs.platform_type == rhs.platform_type) &&
(lhs.num_chips == rhs.num_chips) &&
(lhs.device_paths == rhs.device_paths) &&
(lhs.chip_family == rhs.chip_family);
}
inline bool operator!=(const EdgeTpuDeviceSpecT &lhs, const EdgeTpuDeviceSpecT &rhs) {
return !(lhs == rhs);
}
inline EdgeTpuDeviceSpecT *EdgeTpuDeviceSpec::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new EdgeTpuDeviceSpecT();
UnPackTo(_o, _resolver);
return _o;
}
inline void EdgeTpuDeviceSpec::UnPackTo(EdgeTpuDeviceSpecT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = platform_type(); _o->platform_type = _e; }
{ auto _e = num_chips(); _o->num_chips = _e; }
{ auto _e = device_paths(); if (_e) { _o->device_paths.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->device_paths[_i] = _e->Get(_i)->str(); } } }
{ auto _e = chip_family(); _o->chip_family = _e; }
}
inline flatbuffers::Offset<EdgeTpuDeviceSpec> EdgeTpuDeviceSpec::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateEdgeTpuDeviceSpec(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<EdgeTpuDeviceSpec> CreateEdgeTpuDeviceSpec(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuDeviceSpecT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuDeviceSpecT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _platform_type = _o->platform_type;
auto _num_chips = _o->num_chips;
auto _device_paths = _o->device_paths.size() ? _fbb.CreateVectorOfStrings(_o->device_paths) : 0;
auto _chip_family = _o->chip_family;
return tflite::CreateEdgeTpuDeviceSpec(
_fbb,
_platform_type,
_num_chips,
_device_paths,
_chip_family);
}
inline bool operator==(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) {
return
(lhs.inactive_power_state == rhs.inactive_power_state) &&
(lhs.inactive_timeout_us == rhs.inactive_timeout_us);
}
inline bool operator!=(const EdgeTpuInactivePowerConfigT &lhs, const EdgeTpuInactivePowerConfigT &rhs) {
return !(lhs == rhs);
}
inline EdgeTpuInactivePowerConfigT *EdgeTpuInactivePowerConfig::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new EdgeTpuInactivePowerConfigT();
UnPackTo(_o, _resolver);
return _o;
}
inline void EdgeTpuInactivePowerConfig::UnPackTo(EdgeTpuInactivePowerConfigT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = inactive_power_state(); _o->inactive_power_state = _e; }
{ auto _e = inactive_timeout_us(); _o->inactive_timeout_us = _e; }
}
inline flatbuffers::Offset<EdgeTpuInactivePowerConfig> EdgeTpuInactivePowerConfig::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateEdgeTpuInactivePowerConfig(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<EdgeTpuInactivePowerConfig> CreateEdgeTpuInactivePowerConfig(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuInactivePowerConfigT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuInactivePowerConfigT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _inactive_power_state = _o->inactive_power_state;
auto _inactive_timeout_us = _o->inactive_timeout_us;
return tflite::CreateEdgeTpuInactivePowerConfig(
_fbb,
_inactive_power_state,
_inactive_timeout_us);
}
inline bool operator==(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) {
return
(lhs.inference_power_state == rhs.inference_power_state) &&
(lhs.inactive_power_configs == rhs.inactive_power_configs) &&
(lhs.inference_priority == rhs.inference_priority) &&
((!lhs.edgetpu_device_spec && !rhs.edgetpu_device_spec) || (lhs.edgetpu_device_spec && rhs.edgetpu_device_spec && *lhs.edgetpu_device_spec == *rhs.edgetpu_device_spec) || (lhs.edgetpu_device_spec && !rhs.edgetpu_device_spec && *lhs.edgetpu_device_spec == decltype(lhs.edgetpu_device_spec)::element_type()) || (rhs.edgetpu_device_spec && !lhs.edgetpu_device_spec && *rhs.edgetpu_device_spec == decltype(rhs.edgetpu_device_spec)::element_type())) &&
(lhs.model_token == rhs.model_token);
}
inline bool operator!=(const EdgeTpuSettingsT &lhs, const EdgeTpuSettingsT &rhs) {
return !(lhs == rhs);
}
inline EdgeTpuSettingsT *EdgeTpuSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new EdgeTpuSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void EdgeTpuSettings::UnPackTo(EdgeTpuSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = inference_power_state(); _o->inference_power_state = _e; }
{ auto _e = inactive_power_configs(); if (_e) { _o->inactive_power_configs.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inactive_power_configs[_i] = std::unique_ptr<tflite::EdgeTpuInactivePowerConfigT>(_e->Get(_i)->UnPack(_resolver)); } } }
{ auto _e = inference_priority(); _o->inference_priority = _e; }
{ auto _e = edgetpu_device_spec(); if (_e) _o->edgetpu_device_spec = std::unique_ptr<tflite::EdgeTpuDeviceSpecT>(_e->UnPack(_resolver)); }
{ auto _e = model_token(); if (_e) _o->model_token = _e->str(); }
}
inline flatbuffers::Offset<EdgeTpuSettings> EdgeTpuSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateEdgeTpuSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<EdgeTpuSettings> CreateEdgeTpuSettings(flatbuffers::FlatBufferBuilder &_fbb, const EdgeTpuSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const EdgeTpuSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _inference_power_state = _o->inference_power_state;
auto _inactive_power_configs = _o->inactive_power_configs.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::EdgeTpuInactivePowerConfig>> (_o->inactive_power_configs.size(), [](size_t i, _VectorArgs *__va) { return CreateEdgeTpuInactivePowerConfig(*__va->__fbb, __va->__o->inactive_power_configs[i].get(), __va->__rehasher); }, &_va ) : 0;
auto _inference_priority = _o->inference_priority;
auto _edgetpu_device_spec = _o->edgetpu_device_spec ? CreateEdgeTpuDeviceSpec(_fbb, _o->edgetpu_device_spec.get(), _rehasher) : 0;
auto _model_token = _o->model_token.empty() ? 0 : _fbb.CreateString(_o->model_token);
return tflite::CreateEdgeTpuSettings(
_fbb,
_inference_power_state,
_inactive_power_configs,
_inference_priority,
_edgetpu_device_spec,
_model_token);
}
inline bool operator==(const CoralSettingsT &lhs, const CoralSettingsT &rhs) {
return
(lhs.device == rhs.device) &&
(lhs.performance == rhs.performance) &&
(lhs.usb_always_dfu == rhs.usb_always_dfu) &&
(lhs.usb_max_bulk_in_queue_length == rhs.usb_max_bulk_in_queue_length);
}
inline bool operator!=(const CoralSettingsT &lhs, const CoralSettingsT &rhs) {
return !(lhs == rhs);
}
inline CoralSettingsT *CoralSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new CoralSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void CoralSettings::UnPackTo(CoralSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = device(); if (_e) _o->device = _e->str(); }
{ auto _e = performance(); _o->performance = _e; }
{ auto _e = usb_always_dfu(); _o->usb_always_dfu = _e; }
{ auto _e = usb_max_bulk_in_queue_length(); _o->usb_max_bulk_in_queue_length = _e; }
}
inline flatbuffers::Offset<CoralSettings> CoralSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateCoralSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<CoralSettings> CreateCoralSettings(flatbuffers::FlatBufferBuilder &_fbb, const CoralSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CoralSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _device = _o->device.empty() ? 0 : _fbb.CreateString(_o->device);
auto _performance = _o->performance;
auto _usb_always_dfu = _o->usb_always_dfu;
auto _usb_max_bulk_in_queue_length = _o->usb_max_bulk_in_queue_length;
return tflite::CreateCoralSettings(
_fbb,
_device,
_performance,
_usb_always_dfu,
_usb_max_bulk_in_queue_length);
}
inline bool operator==(const CPUSettingsT &lhs, const CPUSettingsT &rhs) {
return
(lhs.num_threads == rhs.num_threads);
}
inline bool operator!=(const CPUSettingsT &lhs, const CPUSettingsT &rhs) {
return !(lhs == rhs);
}
inline CPUSettingsT *CPUSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new CPUSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void CPUSettings::UnPackTo(CPUSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = num_threads(); _o->num_threads = _e; }
}
inline flatbuffers::Offset<CPUSettings> CPUSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateCPUSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<CPUSettings> CreateCPUSettings(flatbuffers::FlatBufferBuilder &_fbb, const CPUSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const CPUSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _num_threads = _o->num_threads;
return tflite::CreateCPUSettings(
_fbb,
_num_threads);
}
inline bool operator==(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) {
return
(lhs.delegate == rhs.delegate) &&
((!lhs.nnapi_settings && !rhs.nnapi_settings) || (lhs.nnapi_settings && rhs.nnapi_settings && *lhs.nnapi_settings == *rhs.nnapi_settings) || (lhs.nnapi_settings && !rhs.nnapi_settings && *lhs.nnapi_settings == decltype(lhs.nnapi_settings)::element_type()) || (rhs.nnapi_settings && !lhs.nnapi_settings && *rhs.nnapi_settings == decltype(rhs.nnapi_settings)::element_type())) &&
((!lhs.gpu_settings && !rhs.gpu_settings) || (lhs.gpu_settings && rhs.gpu_settings && *lhs.gpu_settings == *rhs.gpu_settings) || (lhs.gpu_settings && !rhs.gpu_settings && *lhs.gpu_settings == decltype(lhs.gpu_settings)::element_type()) || (rhs.gpu_settings && !lhs.gpu_settings && *rhs.gpu_settings == decltype(rhs.gpu_settings)::element_type())) &&
((!lhs.hexagon_settings && !rhs.hexagon_settings) || (lhs.hexagon_settings && rhs.hexagon_settings && *lhs.hexagon_settings == *rhs.hexagon_settings) || (lhs.hexagon_settings && !rhs.hexagon_settings && *lhs.hexagon_settings == decltype(lhs.hexagon_settings)::element_type()) || (rhs.hexagon_settings && !lhs.hexagon_settings && *rhs.hexagon_settings == decltype(rhs.hexagon_settings)::element_type())) &&
((!lhs.xnnpack_settings && !rhs.xnnpack_settings) || (lhs.xnnpack_settings && rhs.xnnpack_settings && *lhs.xnnpack_settings == *rhs.xnnpack_settings) || (lhs.xnnpack_settings && !rhs.xnnpack_settings && *lhs.xnnpack_settings == decltype(lhs.xnnpack_settings)::element_type()) || (rhs.xnnpack_settings && !lhs.xnnpack_settings && *rhs.xnnpack_settings == decltype(rhs.xnnpack_settings)::element_type())) &&
((!lhs.cpu_settings && !rhs.cpu_settings) || (lhs.cpu_settings && rhs.cpu_settings && *lhs.cpu_settings == *rhs.cpu_settings) || (lhs.cpu_settings && !rhs.cpu_settings && *lhs.cpu_settings == decltype(lhs.cpu_settings)::element_type()) || (rhs.cpu_settings && !lhs.cpu_settings && *rhs.cpu_settings == decltype(rhs.cpu_settings)::element_type())) &&
(lhs.max_delegated_partitions == rhs.max_delegated_partitions) &&
((!lhs.edgetpu_settings && !rhs.edgetpu_settings) || (lhs.edgetpu_settings && rhs.edgetpu_settings && *lhs.edgetpu_settings == *rhs.edgetpu_settings) || (lhs.edgetpu_settings && !rhs.edgetpu_settings && *lhs.edgetpu_settings == decltype(lhs.edgetpu_settings)::element_type()) || (rhs.edgetpu_settings && !lhs.edgetpu_settings && *rhs.edgetpu_settings == decltype(rhs.edgetpu_settings)::element_type())) &&
((!lhs.coral_settings && !rhs.coral_settings) || (lhs.coral_settings && rhs.coral_settings && *lhs.coral_settings == *rhs.coral_settings) || (lhs.coral_settings && !rhs.coral_settings && *lhs.coral_settings == decltype(lhs.coral_settings)::element_type()) || (rhs.coral_settings && !lhs.coral_settings && *rhs.coral_settings == decltype(rhs.coral_settings)::element_type())) &&
((!lhs.fallback_settings && !rhs.fallback_settings) || (lhs.fallback_settings && rhs.fallback_settings && *lhs.fallback_settings == *rhs.fallback_settings) || (lhs.fallback_settings && !rhs.fallback_settings && *lhs.fallback_settings == decltype(lhs.fallback_settings)::element_type()) || (rhs.fallback_settings && !lhs.fallback_settings && *rhs.fallback_settings == decltype(rhs.fallback_settings)::element_type()));
}
inline bool operator!=(const TFLiteSettingsT &lhs, const TFLiteSettingsT &rhs) {
return !(lhs == rhs);
}
inline TFLiteSettingsT *TFLiteSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new TFLiteSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void TFLiteSettings::UnPackTo(TFLiteSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = delegate(); _o->delegate = _e; }
{ auto _e = nnapi_settings(); if (_e) _o->nnapi_settings = std::unique_ptr<tflite::NNAPISettingsT>(_e->UnPack(_resolver)); }
{ auto _e = gpu_settings(); if (_e) _o->gpu_settings = std::unique_ptr<tflite::GPUSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = hexagon_settings(); if (_e) _o->hexagon_settings = std::unique_ptr<tflite::HexagonSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = xnnpack_settings(); if (_e) _o->xnnpack_settings = std::unique_ptr<tflite::XNNPackSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = cpu_settings(); if (_e) _o->cpu_settings = std::unique_ptr<tflite::CPUSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = max_delegated_partitions(); _o->max_delegated_partitions = _e; }
{ auto _e = edgetpu_settings(); if (_e) _o->edgetpu_settings = std::unique_ptr<tflite::EdgeTpuSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = coral_settings(); if (_e) _o->coral_settings = std::unique_ptr<tflite::CoralSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = fallback_settings(); if (_e) _o->fallback_settings = std::unique_ptr<tflite::FallbackSettingsT>(_e->UnPack(_resolver)); }
}
inline flatbuffers::Offset<TFLiteSettings> TFLiteSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateTFLiteSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<TFLiteSettings> CreateTFLiteSettings(flatbuffers::FlatBufferBuilder &_fbb, const TFLiteSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const TFLiteSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _delegate = _o->delegate;
auto _nnapi_settings = _o->nnapi_settings ? CreateNNAPISettings(_fbb, _o->nnapi_settings.get(), _rehasher) : 0;
auto _gpu_settings = _o->gpu_settings ? CreateGPUSettings(_fbb, _o->gpu_settings.get(), _rehasher) : 0;
auto _hexagon_settings = _o->hexagon_settings ? CreateHexagonSettings(_fbb, _o->hexagon_settings.get(), _rehasher) : 0;
auto _xnnpack_settings = _o->xnnpack_settings ? CreateXNNPackSettings(_fbb, _o->xnnpack_settings.get(), _rehasher) : 0;
auto _cpu_settings = _o->cpu_settings ? CreateCPUSettings(_fbb, _o->cpu_settings.get(), _rehasher) : 0;
auto _max_delegated_partitions = _o->max_delegated_partitions;
auto _edgetpu_settings = _o->edgetpu_settings ? CreateEdgeTpuSettings(_fbb, _o->edgetpu_settings.get(), _rehasher) : 0;
auto _coral_settings = _o->coral_settings ? CreateCoralSettings(_fbb, _o->coral_settings.get(), _rehasher) : 0;
auto _fallback_settings = _o->fallback_settings ? CreateFallbackSettings(_fbb, _o->fallback_settings.get(), _rehasher) : 0;
return tflite::CreateTFLiteSettings(
_fbb,
_delegate,
_nnapi_settings,
_gpu_settings,
_hexagon_settings,
_xnnpack_settings,
_cpu_settings,
_max_delegated_partitions,
_edgetpu_settings,
_coral_settings,
_fallback_settings);
}
inline bool operator==(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) {
return
(lhs.allow_automatic_fallback_on_compilation_error == rhs.allow_automatic_fallback_on_compilation_error) &&
(lhs.allow_automatic_fallback_on_execution_error == rhs.allow_automatic_fallback_on_execution_error);
}
inline bool operator!=(const FallbackSettingsT &lhs, const FallbackSettingsT &rhs) {
return !(lhs == rhs);
}
inline FallbackSettingsT *FallbackSettings::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new FallbackSettingsT();
UnPackTo(_o, _resolver);
return _o;
}
inline void FallbackSettings::UnPackTo(FallbackSettingsT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = allow_automatic_fallback_on_compilation_error(); _o->allow_automatic_fallback_on_compilation_error = _e; }
{ auto _e = allow_automatic_fallback_on_execution_error(); _o->allow_automatic_fallback_on_execution_error = _e; }
}
inline flatbuffers::Offset<FallbackSettings> FallbackSettings::Pack(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateFallbackSettings(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<FallbackSettings> CreateFallbackSettings(flatbuffers::FlatBufferBuilder &_fbb, const FallbackSettingsT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const FallbackSettingsT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _allow_automatic_fallback_on_compilation_error = _o->allow_automatic_fallback_on_compilation_error;
auto _allow_automatic_fallback_on_execution_error = _o->allow_automatic_fallback_on_execution_error;
return tflite::CreateFallbackSettings(
_fbb,
_allow_automatic_fallback_on_compilation_error,
_allow_automatic_fallback_on_execution_error);
}
inline bool operator==(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) {
return
(lhs.name == rhs.name) &&
(lhs.values == rhs.values);
}
inline bool operator!=(const BenchmarkMetricT &lhs, const BenchmarkMetricT &rhs) {
return !(lhs == rhs);
}
inline BenchmarkMetricT *BenchmarkMetric::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new BenchmarkMetricT();
UnPackTo(_o, _resolver);
return _o;
}
inline void BenchmarkMetric::UnPackTo(BenchmarkMetricT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = name(); if (_e) _o->name = _e->str(); }
{ auto _e = values(); if (_e) { _o->values.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->values[_i] = _e->Get(_i); } } }
}
inline flatbuffers::Offset<BenchmarkMetric> BenchmarkMetric::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateBenchmarkMetric(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<BenchmarkMetric> CreateBenchmarkMetric(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkMetricT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkMetricT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _name = _o->name.empty() ? 0 : _fbb.CreateString(_o->name);
auto _values = _o->values.size() ? _fbb.CreateVector(_o->values) : 0;
return tflite::CreateBenchmarkMetric(
_fbb,
_name,
_values);
}
inline bool operator==(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) {
return
(lhs.initialization_time_us == rhs.initialization_time_us) &&
(lhs.inference_time_us == rhs.inference_time_us) &&
(lhs.max_memory_kb == rhs.max_memory_kb) &&
(lhs.ok == rhs.ok) &&
(lhs.metrics == rhs.metrics);
}
inline bool operator!=(const BenchmarkResultT &lhs, const BenchmarkResultT &rhs) {
return !(lhs == rhs);
}
inline BenchmarkResultT *BenchmarkResult::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new BenchmarkResultT();
UnPackTo(_o, _resolver);
return _o;
}
inline void BenchmarkResult::UnPackTo(BenchmarkResultT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = initialization_time_us(); if (_e) { _o->initialization_time_us.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->initialization_time_us[_i] = _e->Get(_i); } } }
{ auto _e = inference_time_us(); if (_e) { _o->inference_time_us.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->inference_time_us[_i] = _e->Get(_i); } } }
{ auto _e = max_memory_kb(); _o->max_memory_kb = _e; }
{ auto _e = ok(); _o->ok = _e; }
{ auto _e = metrics(); if (_e) { _o->metrics.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->metrics[_i] = std::unique_ptr<tflite::BenchmarkMetricT>(_e->Get(_i)->UnPack(_resolver)); } } }
}
inline flatbuffers::Offset<BenchmarkResult> BenchmarkResult::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateBenchmarkResult(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<BenchmarkResult> CreateBenchmarkResult(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkResultT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkResultT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _initialization_time_us = _o->initialization_time_us.size() ? _fbb.CreateVector(_o->initialization_time_us) : 0;
auto _inference_time_us = _o->inference_time_us.size() ? _fbb.CreateVector(_o->inference_time_us) : 0;
auto _max_memory_kb = _o->max_memory_kb;
auto _ok = _o->ok;
auto _metrics = _o->metrics.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::BenchmarkMetric>> (_o->metrics.size(), [](size_t i, _VectorArgs *__va) { return CreateBenchmarkMetric(*__va->__fbb, __va->__o->metrics[i].get(), __va->__rehasher); }, &_va ) : 0;
return tflite::CreateBenchmarkResult(
_fbb,
_initialization_time_us,
_inference_time_us,
_max_memory_kb,
_ok,
_metrics);
}
inline bool operator==(const ErrorCodeT &lhs, const ErrorCodeT &rhs) {
return
(lhs.source == rhs.source) &&
(lhs.tflite_error == rhs.tflite_error) &&
(lhs.underlying_api_error == rhs.underlying_api_error);
}
inline bool operator!=(const ErrorCodeT &lhs, const ErrorCodeT &rhs) {
return !(lhs == rhs);
}
inline ErrorCodeT *ErrorCode::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new ErrorCodeT();
UnPackTo(_o, _resolver);
return _o;
}
inline void ErrorCode::UnPackTo(ErrorCodeT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = source(); _o->source = _e; }
{ auto _e = tflite_error(); _o->tflite_error = _e; }
{ auto _e = underlying_api_error(); _o->underlying_api_error = _e; }
}
inline flatbuffers::Offset<ErrorCode> ErrorCode::Pack(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateErrorCode(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<ErrorCode> CreateErrorCode(flatbuffers::FlatBufferBuilder &_fbb, const ErrorCodeT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const ErrorCodeT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _source = _o->source;
auto _tflite_error = _o->tflite_error;
auto _underlying_api_error = _o->underlying_api_error;
return tflite::CreateErrorCode(
_fbb,
_source,
_tflite_error,
_underlying_api_error);
}
inline bool operator==(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) {
return
(lhs.stage == rhs.stage) &&
(lhs.exit_code == rhs.exit_code) &&
(lhs.signal == rhs.signal) &&
(lhs.error_code == rhs.error_code);
}
inline bool operator!=(const BenchmarkErrorT &lhs, const BenchmarkErrorT &rhs) {
return !(lhs == rhs);
}
inline BenchmarkErrorT *BenchmarkError::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new BenchmarkErrorT();
UnPackTo(_o, _resolver);
return _o;
}
inline void BenchmarkError::UnPackTo(BenchmarkErrorT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = stage(); _o->stage = _e; }
{ auto _e = exit_code(); _o->exit_code = _e; }
{ auto _e = signal(); _o->signal = _e; }
{ auto _e = error_code(); if (_e) { _o->error_code.resize(_e->size()); for (flatbuffers::uoffset_t _i = 0; _i < _e->size(); _i++) { _o->error_code[_i] = std::unique_ptr<tflite::ErrorCodeT>(_e->Get(_i)->UnPack(_resolver)); } } }
}
inline flatbuffers::Offset<BenchmarkError> BenchmarkError::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateBenchmarkError(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<BenchmarkError> CreateBenchmarkError(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkErrorT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkErrorT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _stage = _o->stage;
auto _exit_code = _o->exit_code;
auto _signal = _o->signal;
auto _error_code = _o->error_code.size() ? _fbb.CreateVector<flatbuffers::Offset<tflite::ErrorCode>> (_o->error_code.size(), [](size_t i, _VectorArgs *__va) { return CreateErrorCode(*__va->__fbb, __va->__o->error_code[i].get(), __va->__rehasher); }, &_va ) : 0;
return tflite::CreateBenchmarkError(
_fbb,
_stage,
_exit_code,
_signal,
_error_code);
}
inline bool operator==(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) {
return
((!lhs.tflite_settings && !rhs.tflite_settings) || (lhs.tflite_settings && rhs.tflite_settings && *lhs.tflite_settings == *rhs.tflite_settings) || (lhs.tflite_settings && !rhs.tflite_settings && *lhs.tflite_settings == decltype(lhs.tflite_settings)::element_type()) || (rhs.tflite_settings && !lhs.tflite_settings && *rhs.tflite_settings == decltype(rhs.tflite_settings)::element_type())) &&
(lhs.event_type == rhs.event_type) &&
((!lhs.result && !rhs.result) || (lhs.result && rhs.result && *lhs.result == *rhs.result) || (lhs.result && !rhs.result && *lhs.result == decltype(lhs.result)::element_type()) || (rhs.result && !lhs.result && *rhs.result == decltype(rhs.result)::element_type())) &&
((!lhs.error && !rhs.error) || (lhs.error && rhs.error && *lhs.error == *rhs.error) || (lhs.error && !rhs.error && *lhs.error == decltype(lhs.error)::element_type()) || (rhs.error && !lhs.error && *rhs.error == decltype(rhs.error)::element_type())) &&
(lhs.boottime_us == rhs.boottime_us) &&
(lhs.wallclock_us == rhs.wallclock_us);
}
inline bool operator!=(const BenchmarkEventT &lhs, const BenchmarkEventT &rhs) {
return !(lhs == rhs);
}
inline BenchmarkEventT *BenchmarkEvent::UnPack(const flatbuffers::resolver_function_t *_resolver) const {
auto _o = new BenchmarkEventT();
UnPackTo(_o, _resolver);
return _o;
}
inline void BenchmarkEvent::UnPackTo(BenchmarkEventT *_o, const flatbuffers::resolver_function_t *_resolver) const {
(void)_o;
(void)_resolver;
{ auto _e = tflite_settings(); if (_e) _o->tflite_settings = std::unique_ptr<tflite::TFLiteSettingsT>(_e->UnPack(_resolver)); }
{ auto _e = event_type(); _o->event_type = _e; }
{ auto _e = result(); if (_e) _o->result = std::unique_ptr<tflite::BenchmarkResultT>(_e->UnPack(_resolver)); }
{ auto _e = error(); if (_e) _o->error = std::unique_ptr<tflite::BenchmarkErrorT>(_e->UnPack(_resolver)); }
{ auto _e = boottime_us(); _o->boottime_us = _e; }
{ auto _e = wallclock_us(); _o->wallclock_us = _e; }
}
inline flatbuffers::Offset<BenchmarkEvent> BenchmarkEvent::Pack(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT* _o, const flatbuffers::rehasher_function_t *_rehasher) {
return CreateBenchmarkEvent(_fbb, _o, _rehasher);
}
inline flatbuffers::Offset<BenchmarkEvent> CreateBenchmarkEvent(flatbuffers::FlatBufferBuilder &_fbb, const BenchmarkEventT *_o, const flatbuffers::rehasher_function_t *_rehasher) {
(void)_rehasher;
(void)_o;
struct _VectorArgs { flatbuffers::FlatBufferBuilder *__fbb; const BenchmarkEventT* __o; const flatbuffers::rehasher_function_t *__rehasher; } _va = { &_fbb, _o, _rehasher}; (void)_va;
auto _tflite_settings = _o->tflite_settings ? CreateTFLiteSettings(_fbb, _o->tflite_settings.get(), _rehasher) : 0;
auto _event_type = _o->event_type;
auto _result = _o->result ? CreateBenchmarkResult(_fbb, _o->result.get(), _rehasher) : 0;
auto _error = _o->error ? CreateBenchmarkError(_fbb, _o->error.get(), _rehasher) : 0;
auto _boottime_us = _o->boottime_us;
auto _wallclock_us = _o->wallclock_us;
return tflite::CreateBenchmarkEvent(
_fbb,
_tflite_settings,
_event_type,
_result,
_error,
_boottime_us,
_wallclock_us);
}
} // namespace tflite
#endif // FLATBUFFERS_GENERATED_CONFIGURATION_TFLITE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h | C++ | apache-2.0 | 135,041 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h"
#include "absl/synchronization/mutex.h"
namespace tflite {
namespace delegates {
void DelegatePluginRegistry::RegisterImpl(
const std::string& name,
std::function<
std::unique_ptr<DelegatePluginInterface>(const TFLiteSettings&)>
creator_function) {
absl::MutexLock lock(&mutex_);
factories_[name] = creator_function;
}
std::unique_ptr<DelegatePluginInterface> DelegatePluginRegistry::CreateImpl(
const std::string& name, const TFLiteSettings& settings) {
absl::MutexLock lock(&mutex_);
auto it = factories_.find(name);
return (it != factories_.end()) ? it->second(settings) : nullptr;
}
DelegatePluginRegistry* DelegatePluginRegistry::GetSingleton() {
static auto* instance = new DelegatePluginRegistry();
return instance;
}
std::unique_ptr<DelegatePluginInterface> DelegatePluginRegistry::CreateByName(
const std::string& name, const TFLiteSettings& settings) {
auto* const instance = DelegatePluginRegistry::GetSingleton();
return instance->CreateImpl(name, settings);
}
DelegatePluginRegistry::Register::Register(const std::string& name,
CreatorFunction creator_function) {
auto* const instance = DelegatePluginRegistry::GetSingleton();
instance->RegisterImpl(name, creator_function);
}
} // namespace delegates
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/delegate_registry.cc | C++ | apache-2.0 | 2,100 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_DELEGATE_REGISTRY_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_DELEGATE_REGISTRY_H_
#include <memory>
#include <unordered_map>
#include "absl/synchronization/mutex.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
// Defines an interface for TFLite delegate plugins.
//
// The acceleration library aims to support all TFLite delegates based on
// configuration expressed as data (flatbuffers). However, consumers tend to
// care about size and also use a subset of delegates. Hence we don't want to
// statically build against all delegates.
//
// This interface allows plugins to handle specific delegates.
//
// Goal of this interface is not to abstract away all the differences between
// delegates. The goal is only to avoid static linking.
//
// Note to implementers: this interface may change if new delegates don't fit
// into the same design.
namespace tflite {
namespace delegates {
// Same w/ Interpreter::TfLiteDelegatePtr to avoid pulling
// tensorflow/lite/interpreter.h dependency
using TfLiteDelegatePtr =
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>;
class DelegatePluginInterface {
public:
virtual TfLiteDelegatePtr Create() = 0;
virtual int GetDelegateErrno(TfLiteDelegate* from_delegate) = 0;
virtual ~DelegatePluginInterface() = default;
};
// A stripped-down registry that allows delegate plugins to be created by name.
//
// Limitations:
// - Doesn't allow deregistration.
// - Doesn't check for duplication registration.
//
class DelegatePluginRegistry {
public:
typedef std::function<std::unique_ptr<DelegatePluginInterface>(
const TFLiteSettings&)>
CreatorFunction;
// Returns a DelegatePluginInterface registered with `name` or nullptr if no
// matching plugin found.
// TFLiteSettings is per-plugin, so that the corresponding delegate options
// data lifetime is maintained.
static std::unique_ptr<DelegatePluginInterface> CreateByName(
const std::string& name, const TFLiteSettings& settings);
// Struct to be statically allocated for registration.
struct Register {
Register(const std::string& name, CreatorFunction creator_function);
};
private:
void RegisterImpl(const std::string& name, CreatorFunction creator_function);
std::unique_ptr<DelegatePluginInterface> CreateImpl(
const std::string& name, const TFLiteSettings& settings);
static DelegatePluginRegistry* GetSingleton();
absl::Mutex mutex_;
std::unordered_map<std::string, CreatorFunction> factories_
ABSL_GUARDED_BY(mutex_);
};
} // namespace delegates
} // namespace tflite
#define TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION_VNAME(name, f) \
static auto* g_delegate_plugin_##name##_ = \
new DelegatePluginRegistry::Register(#name, f);
#define TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(name, f) \
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION_VNAME(name, f);
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_DELEGATE_REGISTRY_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h | C++ | apache-2.0 | 3,805 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/gpu/delegate.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h"
namespace tflite {
namespace delegates {
class GpuPlugin : public DelegatePluginInterface {
public:
TfLiteDelegatePtr Create() override {
return TfLiteDelegatePtr(TfLiteGpuDelegateV2Create(&options_),
TfLiteGpuDelegateV2Delete);
}
int GetDelegateErrno(TfLiteDelegate* from_delegate) override { return 0; }
static std::unique_ptr<DelegatePluginInterface> New(
const TFLiteSettings& acceleration) {
return absl::make_unique<GpuPlugin>(acceleration);
}
explicit GpuPlugin(const TFLiteSettings& tflite_settings)
: options_(TfLiteGpuDelegateOptionsV2Default()) {
const auto* gpu_settings = tflite_settings.gpu_settings();
if (gpu_settings) {
options_.inference_priority1 =
gpu_settings->is_precision_loss_allowed()
? TFLITE_GPU_INFERENCE_PRIORITY_MIN_LATENCY
: TFLITE_GPU_INFERENCE_PRIORITY_MAX_PRECISION;
if (gpu_settings->enable_quantized_inference()) {
options_.experimental_flags |=
TFLITE_GPU_EXPERIMENTAL_FLAGS_ENABLE_QUANT;
}
if (gpu_settings->force_backend() == GPUBackend_OPENCL) {
options_.experimental_flags |= TFLITE_GPU_EXPERIMENTAL_FLAGS_CL_ONLY;
} else if (gpu_settings->force_backend() == GPUBackend_OPENGL) {
options_.experimental_flags |= TFLITE_GPU_EXPERIMENTAL_FLAGS_GL_ONLY;
}
if (tflite_settings.max_delegated_partitions() >= 0) {
options_.max_delegated_partitions =
tflite_settings.max_delegated_partitions();
}
}
}
private:
TfLiteGpuDelegateOptionsV2 options_;
};
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(GpuPlugin, GpuPlugin::New);
} // namespace delegates
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/gpu_plugin.cc | C++ | apache-2.0 | 2,679 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h"
#if defined(__ARM_ARCH)
#include "tensorflow/lite/delegates/hexagon/hexagon_delegate.h"
#endif
namespace tflite {
namespace delegates {
class HexagonPlugin : public DelegatePluginInterface {
public:
TfLiteDelegatePtr Create() override {
#if defined(__ARM_ARCH)
TfLiteHexagonInit();
auto* delegate_ptr = TfLiteHexagonDelegateCreate(&options_);
TfLiteDelegatePtr delegate(delegate_ptr, [](TfLiteDelegate* delegate) {
TfLiteHexagonDelegateDelete(delegate);
TfLiteHexagonTearDown();
});
return delegate;
#else // !defined(__ARM_ARCH)
return TfLiteDelegatePtr(nullptr, [](TfLiteDelegate*) {});
#endif // defined(__ARM_ARCH)
}
int GetDelegateErrno(TfLiteDelegate* /* from_delegate */) override {
return 0;
}
static std::unique_ptr<HexagonPlugin> New(
const TFLiteSettings& tflite_settings) {
return absl::make_unique<HexagonPlugin>(tflite_settings);
}
explicit HexagonPlugin(const TFLiteSettings& tflite_settings) {
const HexagonSettings* settings = tflite_settings.hexagon_settings();
#if defined(__ARM_ARCH)
options_ = TfLiteHexagonDelegateOptions({0});
if (settings) {
options_.debug_level = settings->debug_level();
options_.powersave_level = settings->powersave_level();
options_.print_graph_profile = settings->print_graph_profile();
options_.print_graph_debug = settings->print_graph_debug();
if (tflite_settings.max_delegated_partitions() >= 0) {
options_.max_delegated_partitions =
tflite_settings.max_delegated_partitions();
}
}
#else
(void)settings;
#endif
}
private:
#if defined(__ARM_ARCH)
TfLiteHexagonDelegateOptions options_;
#endif
};
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(HexagonPlugin, HexagonPlugin::New);
} // namespace delegates
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/hexagon_plugin.cc | C++ | apache-2.0 | 2,727 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This file implements the TFLite Delegate Plugin for the NNAPI Delegate.
#include "tensorflow/lite/experimental/acceleration/configuration/nnapi_plugin.h"
namespace tflite {
namespace delegates {
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(NnapiPlugin, NnapiPlugin::New);
} // namespace delegates
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/nnapi_plugin.cc | C++ | apache-2.0 | 995 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_NNAPI_PLUGIN_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_NNAPI_PLUGIN_H_
// This file provides the NNApiPlugin class, which implements the
// TFLite Delegate Plugin for the NNAPI Delegate.
#include <memory>
#include <string>
#include "absl/memory/memory.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/experimental/acceleration/configuration/c/delegate_plugin.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h"
namespace tflite {
namespace delegates {
class NnapiPlugin : public DelegatePluginInterface {
public:
TfLiteDelegatePtr Create() override {
auto nnapi_delegate =
absl::make_unique<tflite::StatefulNnApiDelegate>(options_);
return TfLiteDelegatePtr(
nnapi_delegate.release(), [](TfLiteDelegate* delegate) {
delete static_cast<tflite::StatefulNnApiDelegate*>(delegate);
});
}
int GetDelegateErrno(TfLiteDelegate* from_delegate) override {
auto nnapi_delegate =
static_cast<tflite::StatefulNnApiDelegate*>(from_delegate);
return nnapi_delegate->GetNnApiErrno();
}
static std::unique_ptr<NnapiPlugin> New(
const TFLiteSettings& tflite_settings) {
return absl::make_unique<NnapiPlugin>(tflite_settings);
}
explicit NnapiPlugin(const TFLiteSettings& tflite_settings) {
const NNAPISettings* nnapi_settings = tflite_settings.nnapi_settings();
if (!nnapi_settings) return;
if (nnapi_settings->accelerator_name() &&
nnapi_settings->accelerator_name()->Length() != 0) {
accelerator_ = nnapi_settings->accelerator_name()->str();
options_.accelerator_name = accelerator_.c_str();
}
if (nnapi_settings->cache_directory() &&
nnapi_settings->cache_directory()->Length() != 0) {
cache_dir_ = nnapi_settings->cache_directory()->str();
options_.cache_dir = cache_dir_.c_str();
}
if (nnapi_settings->model_token() &&
nnapi_settings->model_token()->Length() != 0) {
model_token_ = nnapi_settings->model_token()->str();
options_.model_token = model_token_.c_str();
}
options_.execution_preference =
ConvertExecutionPrefence(nnapi_settings->execution_preference());
options_.disallow_nnapi_cpu =
!nnapi_settings->allow_nnapi_cpu_on_android_10_plus();
options_.execution_priority =
ConvertExecutionPriority(nnapi_settings->execution_priority());
options_.allow_fp16 = nnapi_settings->allow_fp16_precision_for_fp32();
options_.use_burst_computation = nnapi_settings->use_burst_computation();
if (tflite_settings.max_delegated_partitions() >= 0) {
options_.max_number_delegated_partitions =
tflite_settings.max_delegated_partitions();
}
}
const tflite::StatefulNnApiDelegate::Options& Options() { return options_; }
private:
static inline tflite::StatefulNnApiDelegate::Options::ExecutionPreference
ConvertExecutionPrefence(
NNAPIExecutionPreference from_compatibility_preference) {
using TflitePreference =
tflite::StatefulNnApiDelegate::Options::ExecutionPreference;
switch (from_compatibility_preference) {
case NNAPIExecutionPreference_NNAPI_LOW_POWER:
return TflitePreference::kLowPower;
case NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER:
return TflitePreference::kFastSingleAnswer;
case NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED:
return TflitePreference::kSustainedSpeed;
default:
return TflitePreference::kUndefined;
}
}
static inline int ConvertExecutionPriority(
NNAPIExecutionPriority from_compatibility_priority) {
switch (from_compatibility_priority) {
case NNAPIExecutionPriority_NNAPI_PRIORITY_LOW:
return ANEURALNETWORKS_PRIORITY_LOW;
case NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM:
return ANEURALNETWORKS_PRIORITY_MEDIUM;
case NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH:
return ANEURALNETWORKS_PRIORITY_HIGH;
default:
return ANEURALNETWORKS_PRIORITY_DEFAULT;
}
}
std::string accelerator_, cache_dir_, model_token_;
tflite::StatefulNnApiDelegate::Options options_;
};
} // namespace delegates
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_NNAPI_PLUGIN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/nnapi_plugin.h | C++ | apache-2.0 | 5,203 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <memory>
#include <gtest/gtest.h>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_kernel.h"
#include "tensorflow/lite/delegates/nnapi/nnapi_delegate_mock_test.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
// Tests for checking that the NNAPI Delegate plugin correctly handles all the
// options from the flatbuffer.
//
// Checking done at NNAPI call level, as that is where we have a mockable
// layer.
namespace tflite {
namespace {
using delegate::nnapi::NnApiMock;
class SingleAddOpModel : tflite::SingleOpModel {
public:
void Build() {
int input = AddInput({tflite::TensorType_FLOAT32, {1, 2, 2}});
int constant = AddConstInput({tflite::TensorType_FLOAT32, {1, 2, 2}},
{1.0f, 1.0f, 1.0f, 1.0f});
AddOutput({tflite::TensorType_FLOAT32, {}});
SetBuiltinOp(tflite::BuiltinOperator_ADD, tflite::BuiltinOptions_AddOptions,
tflite::CreateAddOptions(builder_).Union());
// Set apply_delegate to false to skip applying TfLite default delegates.
BuildInterpreter({GetShape(input), GetShape(constant)},
/*num_threads=*/-1,
/*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/false,
/*allocate_and_delegate=*/true);
}
tflite::Interpreter* Interpreter() const { return interpreter_.get(); }
};
class NNAPIPluginTest : public ::testing::Test {
protected:
NNAPIPluginTest() : delegate_(nullptr, [](TfLiteDelegate*) {}) {}
void SetUp() override {
nnapi_ = const_cast<NnApi*>(NnApiImplementation());
nnapi_mock_ = absl::make_unique<NnApiMock>(nnapi_);
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
[](const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps) -> int {
supportedOps[0] = true;
return 0;
};
model_.Build();
}
template <NNAPIExecutionPreference input, int output>
void CheckExecutionPreference() {
// Note - this uses a template since the NNAPI functions are C function
// pointers rather than lambdas so can't capture variables.
nnapi_->ANeuralNetworksCompilation_setPreference =
[](ANeuralNetworksCompilation* compilation, int32_t preference) {
return preference - output;
};
CreateDelegate(CreateNNAPISettings(fbb_, 0, 0, 0, input));
// Since delegation succeeds, the model becomes immutable and hence can't
// reuse it.
SingleAddOpModel model;
model.Build();
EXPECT_EQ(model.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk)
<< " given input: " << input << " expected output: " << output;
}
template <NNAPIExecutionPriority input, int output>
void CheckExecutionPriority() {
// Note - this uses a template since the NNAPI functions are C function
// pointers rather than lambdas so can't capture variables.
nnapi_->ANeuralNetworksCompilation_setPriority =
[](ANeuralNetworksCompilation* compilation, int32_t priority) {
return priority - output;
};
CreateDelegate(CreateNNAPISettings(fbb_, 0, 0, 0,
NNAPIExecutionPreference_UNDEFINED, 0, 0,
/*allow CPU=*/true, input));
// Since delegation succeeds, the model becomes immutable and hence can't
// reuse it.
SingleAddOpModel model;
model.Build();
EXPECT_EQ(model.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk)
<< " given input: " << input << " expected output: " << output;
}
void CreateDelegate(flatbuffers::Offset<NNAPISettings> settings) {
settings_ = flatbuffers::GetTemporaryPointer(
fbb_, CreateTFLiteSettings(fbb_, tflite::Delegate_NNAPI, settings));
plugin_ = delegates::DelegatePluginRegistry::CreateByName("NnapiPlugin",
*settings_);
delegate_ = plugin_->Create();
}
NnApi* nnapi_;
std::unique_ptr<NnApiMock> nnapi_mock_;
SingleAddOpModel model_;
flatbuffers::FlatBufferBuilder fbb_;
const TFLiteSettings* settings_ = nullptr;
delegates::TfLiteDelegatePtr delegate_;
std::unique_ptr<delegates::DelegatePluginInterface> plugin_;
};
TEST_F(NNAPIPluginTest, PassesAcceleratorName) {
// Fails with non-existent "foo".
CreateDelegate(CreateNNAPISettings(fbb_, fbb_.CreateString("foo")));
EXPECT_EQ(model_.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteDelegateError);
// Succeeds with "test-device" supported by the mock.
CreateDelegate(CreateNNAPISettings(fbb_, fbb_.CreateString("test-device")));
EXPECT_EQ(model_.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
}
TEST_F(NNAPIPluginTest, PassesExecutionPreference) {
CheckExecutionPreference<NNAPIExecutionPreference_UNDEFINED,
StatefulNnApiDelegate::Options::kUndefined>();
CheckExecutionPreference<NNAPIExecutionPreference_NNAPI_LOW_POWER,
StatefulNnApiDelegate::Options::kLowPower>();
CheckExecutionPreference<NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER,
StatefulNnApiDelegate::Options::kFastSingleAnswer>();
CheckExecutionPreference<NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED,
StatefulNnApiDelegate::Options::kSustainedSpeed>();
}
TEST_F(NNAPIPluginTest, PassesExecutionPriority) {
nnapi_->android_sdk_version =
tflite::delegate::nnapi::kMinSdkVersionForNNAPI13;
CheckExecutionPriority<NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
ANEURALNETWORKS_PRIORITY_DEFAULT>();
CheckExecutionPriority<NNAPIExecutionPriority_NNAPI_PRIORITY_LOW,
ANEURALNETWORKS_PRIORITY_LOW>();
CheckExecutionPriority<NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM,
ANEURALNETWORKS_PRIORITY_MEDIUM>();
CheckExecutionPriority<NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH,
ANEURALNETWORKS_PRIORITY_HIGH>();
}
TEST_F(NNAPIPluginTest, PassesCachingParameters) {
nnapi_->ANeuralNetworksCompilation_setCaching =
[](ANeuralNetworksCompilation* compilation, const char* cacheDir,
const uint8_t* token) -> int {
if (std::string(cacheDir) != "d") return 1;
// Token is hashed with other bits, just check that it's not empty.
if (std::string(reinterpret_cast<const char*>(token)).empty()) return 2;
return 0;
};
CreateDelegate(CreateNNAPISettings(fbb_, 0, fbb_.CreateString("d"),
fbb_.CreateString("t")));
EXPECT_EQ(model_.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
}
TEST_F(NNAPIPluginTest, PassesFalseNNAPICpuFlag) {
CreateDelegate(CreateNNAPISettings(fbb_, 0, 0, 0,
NNAPIExecutionPreference_UNDEFINED, 0, 0,
/* allow CPU */ false));
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
[](const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps) -> int {
supportedOps[0] = true;
// Since no CPU, should only pass one device.
return numDevices - 1;
};
EXPECT_EQ(model_.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
}
TEST_F(NNAPIPluginTest, PassesTrueNNAPICpuFlag) {
CreateDelegate(CreateNNAPISettings(fbb_, 0, 0, 0,
NNAPIExecutionPreference_UNDEFINED, 0, 0,
/* allow CPU */ true));
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
[](const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps) -> int {
supportedOps[0] = true;
// With CPU allowed, should pass two devices.
return numDevices - 2;
};
EXPECT_EQ(model_.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
}
/*
* Building a model with three operations that can be used to create multiple
* delegated partitions.
*
* input1 ---
* | - ADD -- ROUND --
* | | - ADD -- output1
* input2 --- |
* |
* input3 -----------------------
*/
class MultiplePartitionsModel : tflite::MultiOpModel {
public:
void Build() {
const tflite::TensorData tensors_data = {tflite::TensorType_FLOAT32,
{1, 2, 2}};
int input1 = AddInput(tensors_data);
int input2 = AddInput(tensors_data);
int input3 = AddInput(tensors_data);
int add_out = AddInnerTensor<float>(tensors_data);
int round_out = AddInnerTensor<float>(tensors_data);
int output = AddOutput(tensors_data);
AddBuiltinOp(
tflite::BuiltinOperator_ADD, tflite::BuiltinOptions_AddOptions,
CreateAddOptions(builder_, ActivationFunctionType_NONE).Union(),
{input1, input2}, {add_out});
AddBuiltinOp(tflite::BuiltinOperator_ROUND, tflite::BuiltinOptions_NONE,
/*builtin_options=*/0, {add_out}, {round_out});
AddBuiltinOp(
tflite::BuiltinOperator_ADD, tflite::BuiltinOptions_AddOptions,
CreateAddOptions(builder_, ActivationFunctionType_NONE).Union(),
{round_out, input3}, {output});
// Set apply_delegate to false to skip applying TfLite default delegates.
BuildInterpreter({GetShape(input1), GetShape(input2), GetShape(input3)},
/*num_threads=*/-1,
/*allow_fp32_relax_to_fp16=*/false,
/*apply_delegate=*/false,
/*allocate_and_delegate=*/true);
}
tflite::Interpreter* Interpreter() const { return interpreter_.get(); }
};
class NNAPIMultiOpPluginTest : public ::testing::Test {
protected:
NNAPIMultiOpPluginTest() : delegate_(nullptr, [](TfLiteDelegate*) {}) {}
void SetUp() override {
nnapi_ = const_cast<NnApi*>(NnApiImplementation());
nnapi_mock_ = absl::make_unique<NnApiMock>(nnapi_);
model_.Build();
}
void CreateDelegate(flatbuffers::Offset<NNAPISettings> settings,
int max_delegated_partitions) {
settings_ = flatbuffers::GetTemporaryPointer(
fbb_,
CreateTFLiteSettings(fbb_, tflite::Delegate_NNAPI, settings,
/* gpu_settings */ 0,
/* hexagon_settings */ 0,
/* xnnpack_settings */ 0,
/* cpu_settings */ 0, max_delegated_partitions));
plugin_ = delegates::DelegatePluginRegistry::CreateByName("NnapiPlugin",
*settings_);
delegate_ = plugin_->Create();
}
int CountNnApiPartitions() {
return std::count_if(std::begin(model_.Interpreter()->execution_plan()),
std::end(model_.Interpreter()->execution_plan()),
[this](const int node_index) {
return model_.Interpreter()
->node_and_registration(node_index)
->first.delegate != nullptr;
});
}
NnApi* nnapi_;
std::unique_ptr<NnApiMock> nnapi_mock_;
MultiplePartitionsModel model_;
flatbuffers::FlatBufferBuilder fbb_;
const TFLiteSettings* settings_ = nullptr;
delegates::TfLiteDelegatePtr delegate_;
std::unique_ptr<delegates::DelegatePluginInterface> plugin_;
};
TEST_F(NNAPIMultiOpPluginTest, PassesMaxDelegatedPartitionsFlag) {
CreateDelegate(CreateNNAPISettings(
fbb_, 0, 0, 0, NNAPIExecutionPreference_UNDEFINED, 0, 0,
/* allow CPU */ true,
/* execution_priority */
tflite::NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED,
/* allow_dynamic_dimensions */ false,
/* allow_fp16_precision_for_fp32 */ false),
/* max_delegated_partitions */ 1);
nnapi_->ANeuralNetworksModel_getSupportedOperationsForDevices =
[](const ANeuralNetworksModel* model,
const ANeuralNetworksDevice* const* devices, uint32_t numDevices,
bool* supportedOps) -> int {
supportedOps[0] = true;
supportedOps[1] = false;
supportedOps[2] = true;
return 0;
};
EXPECT_EQ(model_.Interpreter()->ModifyGraphWithDelegate(delegate_.get()),
kTfLiteOk);
EXPECT_EQ(CountNnApiPartitions(), 1);
}
} // namespace
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/nnapi_plugin_test.cc | C++ | apache-2.0 | 13,814 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/acceleration/configuration/proto_to_flatbuffer.h"
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/experimental/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/minimal_logging.h"
namespace tflite {
using ::flatbuffers::FlatBufferBuilder;
using ::flatbuffers::Offset;
using ::flatbuffers::String;
using ::flatbuffers::Vector;
ExecutionPreference ConvertExecutionPreference(
proto::ExecutionPreference preference) {
switch (preference) {
case proto::ExecutionPreference::ANY:
return ExecutionPreference_ANY;
case proto::ExecutionPreference::LOW_LATENCY:
return ExecutionPreference_LOW_LATENCY;
case proto::ExecutionPreference::LOW_POWER:
return ExecutionPreference_LOW_POWER;
case proto::ExecutionPreference::FORCE_CPU:
return ExecutionPreference_FORCE_CPU;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for ExecutionPreference: %d", preference);
return ExecutionPreference_ANY;
}
Delegate ConvertDelegate(proto::Delegate delegate) {
switch (delegate) {
case proto::Delegate::NONE:
return Delegate_NONE;
case proto::Delegate::NNAPI:
return Delegate_NNAPI;
case proto::Delegate::GPU:
return Delegate_GPU;
case proto::Delegate::HEXAGON:
return Delegate_HEXAGON;
case proto::Delegate::XNNPACK:
return Delegate_XNNPACK;
case proto::Delegate::EDGETPU:
return Delegate_EDGETPU;
case proto::Delegate::EDGETPU_CORAL:
return Delegate_EDGETPU_CORAL;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for Delegate: %d",
delegate);
return Delegate_NONE;
}
NNAPIExecutionPreference ConvertNNAPIExecutionPreference(
proto::NNAPIExecutionPreference preference) {
switch (preference) {
case proto::NNAPIExecutionPreference::UNDEFINED:
return NNAPIExecutionPreference_UNDEFINED;
case proto::NNAPIExecutionPreference::NNAPI_LOW_POWER:
return NNAPIExecutionPreference_NNAPI_LOW_POWER;
case proto::NNAPIExecutionPreference::NNAPI_FAST_SINGLE_ANSWER:
return NNAPIExecutionPreference_NNAPI_FAST_SINGLE_ANSWER;
case proto::NNAPIExecutionPreference::NNAPI_SUSTAINED_SPEED:
return NNAPIExecutionPreference_NNAPI_SUSTAINED_SPEED;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPreference: %d",
preference);
return NNAPIExecutionPreference_UNDEFINED;
}
NNAPIExecutionPriority ConvertNNAPIExecutionPriority(
proto::NNAPIExecutionPriority priority) {
switch (priority) {
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_UNDEFINED:
return NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_LOW:
return NNAPIExecutionPriority_NNAPI_PRIORITY_LOW;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_MEDIUM:
return NNAPIExecutionPriority_NNAPI_PRIORITY_MEDIUM;
case proto::NNAPIExecutionPriority::NNAPI_PRIORITY_HIGH:
return NNAPIExecutionPriority_NNAPI_PRIORITY_HIGH;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for NNAPIExecutionPriority: %d", priority);
return NNAPIExecutionPriority_NNAPI_PRIORITY_UNDEFINED;
}
GPUBackend ConvertGPUBackend(proto::GPUBackend backend) {
switch (backend) {
case proto::GPUBackend::UNSET:
return GPUBackend_UNSET;
case proto::GPUBackend::OPENCL:
return GPUBackend_OPENCL;
case proto::GPUBackend::OPENGL:
return GPUBackend_OPENGL;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR, "Unexpected value for GPUBackend: %d",
backend);
return GPUBackend_UNSET;
}
EdgeTpuPowerState ConvertEdgeTpuPowerState(proto::EdgeTpuPowerState state) {
switch (state) {
case proto::EdgeTpuPowerState::UNDEFINED_POWERSTATE:
return EdgeTpuPowerState_UNDEFINED_POWERSTATE;
case proto::EdgeTpuPowerState::TPU_CORE_OFF:
return EdgeTpuPowerState_TPU_CORE_OFF;
case proto::EdgeTpuPowerState::READY:
return EdgeTpuPowerState_READY;
case proto::EdgeTpuPowerState::ACTIVE_MIN_POWER:
return EdgeTpuPowerState_ACTIVE_MIN_POWER;
case proto::EdgeTpuPowerState::ACTIVE_VERY_LOW_POWER:
return EdgeTpuPowerState_ACTIVE_VERY_LOW_POWER;
case proto::EdgeTpuPowerState::ACTIVE_LOW_POWER:
return EdgeTpuPowerState_ACTIVE_LOW_POWER;
case proto::EdgeTpuPowerState::ACTIVE:
return EdgeTpuPowerState_ACTIVE;
case proto::EdgeTpuPowerState::OVER_DRIVE:
return EdgeTpuPowerState_OVER_DRIVE;
}
TFLITE_LOG_PROD(TFLITE_LOG_ERROR,
"Unexpected value for EdgeTpuSettings::PowerState: %d",
state);
return EdgeTpuPowerState_UNDEFINED_POWERSTATE;
}
Offset<FallbackSettings> ConvertFallbackSettings(
const proto::FallbackSettings& settings, FlatBufferBuilder* builder) {
return CreateFallbackSettings(
*builder, /*allow_automatic_fallback_on_compilation_error=*/
settings.allow_automatic_fallback_on_compilation_error(),
/*allow_automatic_fallback_on_execution_error=*/
settings.allow_automatic_fallback_on_execution_error());
}
Offset<NNAPISettings> ConvertNNAPISettings(const proto::NNAPISettings& settings,
FlatBufferBuilder* builder) {
return CreateNNAPISettings(
*builder,
/*accelerator_name=*/builder->CreateString(settings.accelerator_name()),
/*cache_directory=*/builder->CreateString(settings.cache_directory()),
/*model_token=*/builder->CreateString(settings.model_token()),
ConvertNNAPIExecutionPreference(settings.execution_preference()),
/*no_of_nnapi_instances_to_cache=*/
settings.no_of_nnapi_instances_to_cache(),
ConvertFallbackSettings(settings.fallback_settings(), builder),
/*allow_nnapi_cpu_on_android_10_plus=*/
settings.allow_nnapi_cpu_on_android_10_plus(),
ConvertNNAPIExecutionPriority(settings.execution_priority()),
/*allow_dynamic_dimensions=*/
settings.allow_dynamic_dimensions(),
/*allow_fp16_precision_for_fp32=*/
settings.allow_fp16_precision_for_fp32());
}
Offset<GPUSettings> ConvertGPUSettings(const proto::GPUSettings& settings,
FlatBufferBuilder* builder) {
return CreateGPUSettings(
*builder,
/*is_precision_loss_allowed=*/settings.is_precision_loss_allowed(),
/*enable_quantized_inference=*/settings.enable_quantized_inference(),
ConvertGPUBackend(settings.force_backend()));
}
Offset<HexagonSettings> ConvertHexagonSettings(
const proto::HexagonSettings& settings, FlatBufferBuilder* builder) {
return CreateHexagonSettings(
*builder,
/*debug_level=*/settings.debug_level(),
/*powersave_level=*/settings.powersave_level(),
/*print_graph_profile=*/settings.print_graph_profile(),
/*print_graph_debug=*/settings.print_graph_debug());
}
Offset<XNNPackSettings> ConvertXNNPackSettings(
const proto::XNNPackSettings& settings, FlatBufferBuilder* builder) {
return CreateXNNPackSettings(*builder,
/*num_threads=*/settings.num_threads());
}
Offset<CPUSettings> ConvertCPUSettings(const proto::CPUSettings& settings,
FlatBufferBuilder* builder) {
return CreateCPUSettings(*builder,
/*num_threads=*/settings.num_threads());
}
Offset<tflite::EdgeTpuDeviceSpec> ConvertEdgeTpuDeviceSpec(
FlatBufferBuilder* builder, const proto::EdgeTpuDeviceSpec& device_spec) {
Offset<Vector<Offset<String>>> device_paths_fb = 0;
if (device_spec.device_paths_size() > 0) {
std::vector<Offset<String>> device_paths;
for (const auto& device_path : device_spec.device_paths()) {
auto device_path_fb = builder->CreateString(device_path);
device_paths.push_back(device_path_fb);
}
device_paths_fb = builder->CreateVector(device_paths);
}
return tflite::CreateEdgeTpuDeviceSpec(
*builder,
static_cast<tflite::EdgeTpuDeviceSpec_::PlatformType>(
device_spec.platform_type()),
device_spec.num_chips(), device_paths_fb, device_spec.chip_family());
}
Offset<EdgeTpuSettings> ConvertEdgeTpuSettings(
const proto::EdgeTpuSettings& settings, FlatBufferBuilder* builder) {
Offset<Vector<Offset<tflite::EdgeTpuInactivePowerConfig>>>
inactive_power_configs = 0;
// Uses std vector to first construct the list and creates the flatbuffer
// offset from it later.
std::vector<Offset<tflite::EdgeTpuInactivePowerConfig>>
inactive_power_configs_std;
if (settings.inactive_power_configs_size() > 0) {
for (const auto& config : settings.inactive_power_configs()) {
inactive_power_configs_std.push_back(
tflite::CreateEdgeTpuInactivePowerConfig(
*builder,
static_cast<tflite::EdgeTpuPowerState>(
config.inactive_power_state()),
config.inactive_timeout_us()));
}
inactive_power_configs =
builder->CreateVector<Offset<tflite::EdgeTpuInactivePowerConfig>>(
inactive_power_configs_std);
}
Offset<tflite::EdgeTpuDeviceSpec> edgetpu_device_spec = 0;
if (settings.has_edgetpu_device_spec()) {
edgetpu_device_spec =
ConvertEdgeTpuDeviceSpec(builder, settings.edgetpu_device_spec());
}
Offset<String> model_token = 0;
if (settings.has_model_token()) {
model_token = builder->CreateString(settings.model_token());
}
return CreateEdgeTpuSettings(
*builder, ConvertEdgeTpuPowerState(settings.inference_power_state()),
inactive_power_configs, settings.inference_priority(),
edgetpu_device_spec, model_token);
}
Offset<CoralSettings> ConvertCoralSettings(const proto::CoralSettings& settings,
FlatBufferBuilder* builder) {
return CreateCoralSettings(
*builder, builder->CreateString(settings.device()),
static_cast<tflite::CoralSettings_::Performance>(settings.performance()),
settings.usb_always_dfu(), settings.usb_max_bulk_in_queue_length());
}
Offset<TFLiteSettings> ConvertTfliteSettings(
const proto::TFLiteSettings& settings, FlatBufferBuilder* builder) {
return CreateTFLiteSettings(
*builder, ConvertDelegate(settings.delegate()),
ConvertNNAPISettings(settings.nnapi_settings(), builder),
ConvertGPUSettings(settings.gpu_settings(), builder),
ConvertHexagonSettings(settings.hexagon_settings(), builder),
ConvertXNNPackSettings(settings.xnnpack_settings(), builder),
ConvertCPUSettings(settings.cpu_settings(), builder),
/*max_delegated_partitions=*/settings.max_delegated_partitions(),
ConvertEdgeTpuSettings(settings.edgetpu_settings(), builder),
ConvertCoralSettings(settings.coral_settings(), builder),
ConvertFallbackSettings(settings.fallback_settings(), builder));
}
const ComputeSettings* ConvertFromProto(
const proto::ComputeSettings& proto_settings, FlatBufferBuilder* builder) {
auto settings = CreateComputeSettings(
*builder, ConvertExecutionPreference(proto_settings.preference()),
ConvertTfliteSettings(proto_settings.tflite_settings(), builder),
builder->CreateString(proto_settings.model_namespace_for_statistics()),
builder->CreateString(proto_settings.model_identifier_for_statistics()));
return flatbuffers::GetTemporaryPointer(*builder, settings);
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/proto_to_flatbuffer.cc | C++ | apache-2.0 | 12,279 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_PROTO_TO_FLATBUFFER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_PROTO_TO_FLATBUFFER_H_
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/experimental/acceleration/configuration/configuration.pb.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
namespace tflite {
// Converts the provided ComputeSettings from proto to flatbuffer format.
const ComputeSettings* ConvertFromProto(
const proto::ComputeSettings& proto_settings,
flatbuffers::FlatBufferBuilder* builder);
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_ACCELERATION_CONFIGURATION_PROTO_TO_FLATBUFFER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/proto_to_flatbuffer.h | C++ | apache-2.0 | 1,429 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <memory>
#include "absl/memory/memory.h"
#include "tensorflow/lite/delegates/xnnpack/xnnpack_delegate.h"
#include "tensorflow/lite/experimental/acceleration/configuration/configuration_generated.h"
#include "tensorflow/lite/experimental/acceleration/configuration/delegate_registry.h"
namespace tflite {
namespace delegates {
class XNNPackPlugin : public DelegatePluginInterface {
public:
TfLiteDelegatePtr Create() override {
return TfLiteDelegatePtr(TfLiteXNNPackDelegateCreate(&options_),
TfLiteXNNPackDelegateDelete);
}
int GetDelegateErrno(TfLiteDelegate* from_delegate) override { return 0; }
static std::unique_ptr<DelegatePluginInterface> New(
const TFLiteSettings& acceleration) {
return absl::make_unique<XNNPackPlugin>(acceleration);
}
explicit XNNPackPlugin(const TFLiteSettings& tflite_settings)
: options_(TfLiteXNNPackDelegateOptionsDefault()) {
const auto* xnnpack_settings = tflite_settings.xnnpack_settings();
if (xnnpack_settings) {
options_.num_threads = xnnpack_settings->num_threads();
}
}
private:
TfLiteXNNPackDelegateOptions options_;
};
TFLITE_REGISTER_DELEGATE_FACTORY_FUNCTION(XNNPackPlugin, XNNPackPlugin::New);
} // namespace delegates
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/acceleration/configuration/xnnpack_plugin.cc | C++ | apache-2.0 | 1,958 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
using System;
using System.Collections;
using System.Collections.Generic;
using System.Linq;
using TensorFlowLite;
using UnityEngine;
using UnityEngine.UI;
/// <summary>
/// Simple example demonstrating use of the experimental C# bindings for TensorFlowLite.
/// </summary>
public class HelloTFLite : MonoBehaviour {
[Tooltip("Configurable TFLite model.")]
public TextAsset model;
[Tooltip("Configurable TFLite input tensor data.")]
public float[] inputs;
[Tooltip("Target Text widget for display of inference execution.")]
public Text inferenceText;
private Interpreter interpreter;
private float[] outputs;
void Awake() {
// As the demo is extremely simple, there's no need to run at full frame-rate.
QualitySettings.vSyncCount = 0;
Application.targetFrameRate = 5;
}
void Start () {
Debug.LogFormat("TensorFlow Lite Verion: {0}", Interpreter.GetVersion());
var options = new Interpreter.Options() {
threads = 2,
};
interpreter = new Interpreter(model.bytes, options);
int inputCount = interpreter.GetInputTensorCount();
int outputCount = interpreter.GetOutputTensorCount();
for (int i = 0; i < inputCount; i++) {
Debug.LogFormat("Input {0}: {1}", i, interpreter.GetInputTensorInfo(i));
}
for (int i = 0; i < inputCount; i++) {
Debug.LogFormat("Output {0}: {1}", i, interpreter.GetOutputTensorInfo(i));
}
}
void Update () {
if (inputs == null) {
return;
}
if (outputs == null || outputs.Length != inputs.Length) {
interpreter.ResizeInputTensor(0, new int[]{inputs.Length});
interpreter.AllocateTensors();
outputs = new float[inputs.Length];
}
float startTimeSeconds = Time.realtimeSinceStartup;
interpreter.SetInputTensorData(0, inputs);
interpreter.Invoke();
interpreter.GetOutputTensorData(0, outputs);
float inferenceTimeSeconds = Time.realtimeSinceStartup - startTimeSeconds;
inferenceText.text = string.Format(
"Inference took {0:0.0000} ms\nInput(s): {1}\nOutput(s): {2}",
inferenceTimeSeconds * 1000.0,
ArrayToString(inputs),
ArrayToString(outputs));
}
void OnDestroy() {
interpreter.Dispose();
}
private static string ArrayToString(float[] values) {
return string.Join(",", values.Select(x => x.ToString()).ToArray());
}
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/examples/unity/TensorFlowLitePlugin/Assets/TensorFlowLite/Examples/HelloTFLite/Scripts/HelloTFLite.cs | C# | apache-2.0 | 3,029 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
using System;
using System.Runtime.InteropServices;
using System.Linq;
using TfLiteInterpreter = System.IntPtr;
using TfLiteInterpreterOptions = System.IntPtr;
using TfLiteModel = System.IntPtr;
using TfLiteTensor = System.IntPtr;
namespace TensorFlowLite
{
/// <summary>
/// Simple C# bindings for the experimental TensorFlowLite C API.
/// </summary>
public class Interpreter : IDisposable
{
public struct Options: IEquatable<Options> {
/// <summary>
/// The number of CPU threads to use for the interpreter.
/// </summary>
public int threads;
public bool Equals(Options other) {
return threads == other.threads;
}
}
public struct TensorInfo {
public string name { get; internal set; }
public DataType type { get; internal set; }
public int[] dimensions { get; internal set; }
public QuantizationParams quantizationParams { get; internal set; }
public override string ToString() {
return string.Format("name: {0}, type: {1}, dimensions: {2}, quantizationParams: {3}",
name,
type,
"[" + string.Join(",", dimensions.Select(d => d.ToString()).ToArray()) + "]",
"{" + quantizationParams + "}");
}
}
private TfLiteModel model = IntPtr.Zero;
private TfLiteInterpreter interpreter = IntPtr.Zero;
private TfLiteInterpreterOptions options = IntPtr.Zero;
public Interpreter(byte[] modelData): this(modelData, default(Options)) {}
public Interpreter(byte[] modelData, Options options) {
GCHandle modelDataHandle = GCHandle.Alloc(modelData, GCHandleType.Pinned);
IntPtr modelDataPtr = modelDataHandle.AddrOfPinnedObject();
model = TfLiteModelCreate(modelDataPtr, modelData.Length);
if (model == IntPtr.Zero) throw new Exception("Failed to create TensorFlowLite Model");
if (!options.Equals(default(Options))) {
this.options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsSetNumThreads(this.options, options.threads);
}
interpreter = TfLiteInterpreterCreate(model, this.options);
if (interpreter == IntPtr.Zero) throw new Exception("Failed to create TensorFlowLite Interpreter");
}
public void Dispose() {
if (interpreter != IntPtr.Zero) TfLiteInterpreterDelete(interpreter);
interpreter = IntPtr.Zero;
if (model != IntPtr.Zero) TfLiteModelDelete(model);
model = IntPtr.Zero;
if (options != IntPtr.Zero) TfLiteInterpreterOptionsDelete(options);
options = IntPtr.Zero;
}
public void Invoke() {
ThrowIfError(TfLiteInterpreterInvoke(interpreter));
}
public int GetInputTensorCount() {
return TfLiteInterpreterGetInputTensorCount(interpreter);
}
public void SetInputTensorData(int inputTensorIndex, Array inputTensorData) {
GCHandle tensorDataHandle = GCHandle.Alloc(inputTensorData, GCHandleType.Pinned);
IntPtr tensorDataPtr = tensorDataHandle.AddrOfPinnedObject();
TfLiteTensor tensor = TfLiteInterpreterGetInputTensor(interpreter, inputTensorIndex);
ThrowIfError(TfLiteTensorCopyFromBuffer(
tensor, tensorDataPtr, Buffer.ByteLength(inputTensorData)));
}
public void ResizeInputTensor(int inputTensorIndex, int[] inputTensorShape) {
ThrowIfError(TfLiteInterpreterResizeInputTensor(
interpreter, inputTensorIndex, inputTensorShape, inputTensorShape.Length));
}
public void AllocateTensors() {
ThrowIfError(TfLiteInterpreterAllocateTensors(interpreter));
}
public int GetOutputTensorCount() {
return TfLiteInterpreterGetOutputTensorCount(interpreter);
}
public void GetOutputTensorData(int outputTensorIndex, Array outputTensorData) {
GCHandle tensorDataHandle = GCHandle.Alloc(outputTensorData, GCHandleType.Pinned);
IntPtr tensorDataPtr = tensorDataHandle.AddrOfPinnedObject();
TfLiteTensor tensor = TfLiteInterpreterGetOutputTensor(interpreter, outputTensorIndex);
ThrowIfError(TfLiteTensorCopyToBuffer(
tensor, tensorDataPtr, Buffer.ByteLength(outputTensorData)));
}
public TensorInfo GetInputTensorInfo(int index) {
TfLiteTensor tensor = TfLiteInterpreterGetInputTensor(interpreter, index);
return GetTensorInfo(tensor);
}
public TensorInfo GetOutputTensorInfo(int index) {
TfLiteTensor tensor = TfLiteInterpreterGetOutputTensor(interpreter, index);
return GetTensorInfo(tensor);
}
/// <summary>
/// Returns a string describing version information of the TensorFlow Lite library.
/// TensorFlow Lite uses semantic versioning.
/// </summary>
/// <returns>A string describing version information</returns>
public static string GetVersion() {
return Marshal.PtrToStringAnsi(TfLiteVersion());
}
private static string GetTensorName(TfLiteTensor tensor) {
return Marshal.PtrToStringAnsi(TfLiteTensorName(tensor));
}
private static TensorInfo GetTensorInfo(TfLiteTensor tensor) {
int[] dimensions = new int[TfLiteTensorNumDims(tensor)];
for (int i = 0; i < dimensions.Length; i++) {
dimensions[i] = TfLiteTensorDim(tensor, i);
}
return new TensorInfo() {
name = GetTensorName(tensor),
type = TfLiteTensorType(tensor),
dimensions = dimensions,
quantizationParams = TfLiteTensorQuantizationParams(tensor),
};
}
private static void ThrowIfError(int resultCode) {
if (resultCode != 0) throw new Exception("TensorFlowLite operation failed.");
}
#region Externs
#if UNITY_IPHONE && !UNITY_EDITOR
private const string TensorFlowLibrary = "__Internal";
#else
private const string TensorFlowLibrary = "tensorflowlite_c";
#endif
public enum DataType {
NoType = 0,
Float32 = 1,
Int32 = 2,
UInt8 = 3,
Int64 = 4,
String = 5,
Bool = 6,
Int16 = 7,
Complex64 = 8,
Int8 = 9,
Float16 = 10,
}
public struct QuantizationParams {
public float scale;
public int zeroPoint;
public override string ToString() {
return string.Format("scale: {0} zeroPoint: {1}", scale, zeroPoint);
}
}
[DllImport (TensorFlowLibrary)]
private static extern unsafe IntPtr TfLiteVersion();
[DllImport (TensorFlowLibrary)]
private static extern unsafe TfLiteInterpreter TfLiteModelCreate(IntPtr model_data, int model_size);
[DllImport (TensorFlowLibrary)]
private static extern unsafe TfLiteInterpreter TfLiteModelDelete(TfLiteModel model);
[DllImport (TensorFlowLibrary)]
private static extern unsafe TfLiteInterpreterOptions TfLiteInterpreterOptionsCreate();
[DllImport (TensorFlowLibrary)]
private static extern unsafe void TfLiteInterpreterOptionsDelete(TfLiteInterpreterOptions options);
[DllImport (TensorFlowLibrary)]
private static extern unsafe void TfLiteInterpreterOptionsSetNumThreads(
TfLiteInterpreterOptions options,
int num_threads
);
[DllImport (TensorFlowLibrary)]
private static extern unsafe TfLiteInterpreter TfLiteInterpreterCreate(
TfLiteModel model,
TfLiteInterpreterOptions optional_options);
[DllImport (TensorFlowLibrary)]
private static extern unsafe void TfLiteInterpreterDelete(TfLiteInterpreter interpreter);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteInterpreterGetInputTensorCount(
TfLiteInterpreter interpreter);
[DllImport (TensorFlowLibrary)]
private static extern unsafe TfLiteTensor TfLiteInterpreterGetInputTensor(
TfLiteInterpreter interpreter,
int input_index);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteInterpreterResizeInputTensor(
TfLiteInterpreter interpreter,
int input_index,
int[] input_dims,
int input_dims_size);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteInterpreterAllocateTensors(
TfLiteInterpreter interpreter);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteInterpreterInvoke(TfLiteInterpreter interpreter);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteInterpreterGetOutputTensorCount(
TfLiteInterpreter interpreter);
[DllImport (TensorFlowLibrary)]
private static extern unsafe TfLiteTensor TfLiteInterpreterGetOutputTensor(
TfLiteInterpreter interpreter,
int output_index);
[DllImport (TensorFlowLibrary)]
private static extern unsafe DataType TfLiteTensorType(TfLiteTensor tensor);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteTensorNumDims(TfLiteTensor tensor);
[DllImport (TensorFlowLibrary)]
private static extern int TfLiteTensorDim(TfLiteTensor tensor, int dim_index);
[DllImport (TensorFlowLibrary)]
private static extern uint TfLiteTensorByteSize(TfLiteTensor tensor);
[DllImport (TensorFlowLibrary)]
private static extern unsafe IntPtr TfLiteTensorName(TfLiteTensor tensor);
[DllImport (TensorFlowLibrary)]
private static extern unsafe QuantizationParams TfLiteTensorQuantizationParams(TfLiteTensor tensor);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteTensorCopyFromBuffer(
TfLiteTensor tensor,
IntPtr input_data,
int input_data_size);
[DllImport (TensorFlowLibrary)]
private static extern unsafe int TfLiteTensorCopyToBuffer(
TfLiteTensor tensor,
IntPtr output_data,
int output_data_size);
#endregion
}
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/examples/unity/TensorFlowLitePlugin/Assets/TensorFlowLite/SDK/Scripts/Interpreter.cs | C# | apache-2.0 | 10,366 |
load("//tensorflow/lite:build_def.bzl", "tflite_copts")
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"],
)
# ctc support classes imported directly from TensorFlow.
cc_library(
name = "ctc_utils",
hdrs = [
"ctc_beam_entry.h",
"ctc_beam_scorer.h",
"ctc_beam_search.h",
"ctc_decoder.h",
"ctc_loss_util.h",
],
deps = [
":top_n",
"//tensorflow/lite/kernels/internal:compatibility",
"//third_party/eigen3",
],
)
# top_n support classes imported directly from TensorFlow.
cc_library(
name = "top_n",
hdrs = [
"top_n.h",
],
deps = [
"//tensorflow/lite/kernels/internal:compatibility",
],
)
cc_library(
name = "ctc_beam_search_decoder_op",
srcs = [
"ctc_beam_search_decoder.cc",
],
# Suppress warnings that are introduced by Eigen Tensor.
copts = tflite_copts() + [
"-Wno-error=reorder",
] + select({
"//tensorflow:ios": ["-Wno-error=invalid-partial-specialization"],
"//conditions:default": [
],
}),
deps = [
":ctc_utils",
"//tensorflow/lite:framework",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/kernels:op_macros",
"//tensorflow/lite/kernels/internal:optimized_base",
"//tensorflow/lite/kernels/internal:tensor",
"@flatbuffers",
],
)
cc_test(
name = "ctc_beam_search_decoder_test",
size = "small",
srcs = ["ctc_beam_search_decoder_test.cc"],
tags = ["tflite_not_portable_ios"],
deps = [
":ctc_beam_search_decoder_op",
"//tensorflow/lite:framework",
"//tensorflow/lite/kernels:builtin_ops",
"//tensorflow/lite/kernels:test_util",
"@com_google_googletest//:gtest_main",
"@flatbuffers",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/BUILD | Starlark | apache-2.0 | 1,919 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Copied from tensorflow/core/util/ctc/ctc_beam_entry.h
// TODO(b/111524997): Remove this file.
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_ENTRY_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_ENTRY_H_
#include <algorithm>
#include <memory>
#include <unordered_map>
#include <vector>
#include "third_party/eigen3/Eigen/Core"
#include "tensorflow/lite/experimental/kernels/ctc_loss_util.h"
namespace tflite {
namespace experimental {
namespace ctc {
// The ctc_beam_search namespace holds several classes meant to be accessed only
// in case of extending the CTCBeamSearch decoder to allow custom scoring
// functions.
//
// BeamEntry is exposed through template arguments BeamScorer and BeamComparer
// of CTCBeamSearch (ctc_beam_search.h).
namespace ctc_beam_search {
struct EmptyBeamState {};
struct BeamProbability {
BeamProbability() : total(kLogZero), blank(kLogZero), label(kLogZero) {}
void Reset() {
total = kLogZero;
blank = kLogZero;
label = kLogZero;
}
float total;
float blank;
float label;
};
template <class CTCBeamState>
class BeamRoot;
template <class CTCBeamState = EmptyBeamState>
struct BeamEntry {
// BeamRoot<CTCBeamState>::AddEntry() serves as the factory method.
friend BeamEntry<CTCBeamState>* BeamRoot<CTCBeamState>::AddEntry(
BeamEntry<CTCBeamState>* p, int l);
inline bool Active() const { return newp.total != kLogZero; }
// Return the child at the given index, or construct a new one in-place if
// none was found.
BeamEntry& GetChild(int ind) {
auto entry = children.emplace(ind, nullptr);
auto& child_entry = entry.first->second;
// If this is a new child, populate the BeamEntry<CTCBeamState>*.
if (entry.second) {
child_entry = beam_root->AddEntry(this, ind);
}
return *child_entry;
}
std::vector<int> LabelSeq(bool merge_repeated) const {
std::vector<int> labels;
int prev_label = -1;
const BeamEntry* c = this;
while (c->parent != nullptr) { // Checking c->parent to skip root leaf.
if (!merge_repeated || c->label != prev_label) {
labels.push_back(c->label);
}
prev_label = c->label;
c = c->parent;
}
std::reverse(labels.begin(), labels.end());
return labels;
}
BeamEntry<CTCBeamState>* parent;
int label;
// All instances of child BeamEntry are owned by *beam_root.
std::unordered_map<int, BeamEntry<CTCBeamState>*> children;
BeamProbability oldp;
BeamProbability newp;
CTCBeamState state;
private:
// Constructor giving parent, label, and the beam_root.
// The object pointed to by p cannot be copied and should not be moved,
// otherwise parent will become invalid.
// This private constructor is only called through the factory method
// BeamRoot<CTCBeamState>::AddEntry().
BeamEntry(BeamEntry* p, int l, BeamRoot<CTCBeamState>* beam_root)
: parent(p), label(l), beam_root(beam_root) {}
BeamRoot<CTCBeamState>* beam_root;
BeamEntry(const BeamEntry&) = delete;
void operator=(const BeamEntry&) = delete;
};
// This class owns all instances of BeamEntry. This is used to avoid recursive
// destructor call during destruction.
template <class CTCBeamState = EmptyBeamState>
class BeamRoot {
public:
BeamRoot(BeamEntry<CTCBeamState>* p, int l) { root_entry_ = AddEntry(p, l); }
BeamRoot(const BeamRoot&) = delete;
BeamRoot& operator=(const BeamRoot&) = delete;
BeamEntry<CTCBeamState>* AddEntry(BeamEntry<CTCBeamState>* p, int l) {
auto* new_entry = new BeamEntry<CTCBeamState>(p, l, this);
beam_entries_.emplace_back(new_entry);
return new_entry;
}
BeamEntry<CTCBeamState>* RootEntry() const { return root_entry_; }
private:
BeamEntry<CTCBeamState>* root_entry_ = nullptr;
std::vector<std::unique_ptr<BeamEntry<CTCBeamState>>> beam_entries_;
};
// BeamComparer is the default beam comparer provided in CTCBeamSearch.
template <class CTCBeamState = EmptyBeamState>
class BeamComparer {
public:
virtual ~BeamComparer() {}
virtual bool inline operator()(const BeamEntry<CTCBeamState>* a,
const BeamEntry<CTCBeamState>* b) const {
return a->newp.total > b->newp.total;
}
};
} // namespace ctc_beam_search
} // namespace ctc
} // namespace experimental
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_ENTRY_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_beam_entry.h | C++ | apache-2.0 | 5,039 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Collection of scoring classes that can be extended and provided to the
// CTCBeamSearchDecoder to incorporate additional scoring logic (such as a
// language model).
//
// To build a custom scorer extend and implement the pure virtual methods from
// BeamScorerInterface. The default CTC decoding behavior is implemented
// through BaseBeamScorer.
// Copied from tensorflow/core/util/ctc/ctc_beam_scorer.h
// TODO(b/111524997): Remove this file.
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_SCORER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_SCORER_H_
#include "tensorflow/lite/experimental/kernels/ctc_beam_entry.h"
namespace tflite {
namespace experimental {
namespace ctc {
// Base implementation of a beam scorer used by default by the decoder that can
// be subclassed and provided as an argument to CTCBeamSearchDecoder, if complex
// scoring is required. Its main purpose is to provide a thin layer for
// integrating language model scoring easily.
template <typename CTCBeamState>
class BaseBeamScorer {
public:
virtual ~BaseBeamScorer() {}
// State initialization.
virtual void InitializeState(CTCBeamState* root) const {}
// ExpandState is called when expanding a beam to one of its children.
// Called at most once per child beam. In the simplest case, no state
// expansion is done.
virtual void ExpandState(const CTCBeamState& from_state, int from_label,
CTCBeamState* to_state, int to_label) const {}
// ExpandStateEnd is called after decoding has finished. Its purpose is to
// allow a final scoring of the beam in its current state, before resorting
// and retrieving the TopN requested candidates. Called at most once per beam.
virtual void ExpandStateEnd(CTCBeamState* state) const {}
// GetStateExpansionScore should be an inexpensive method to retrieve the
// (cached) expansion score computed within ExpandState. The score is
// multiplied (log-addition) with the input score at the current step from
// the network.
//
// The score returned should be a log-probability. In the simplest case, as
// there's no state expansion logic, the expansion score is zero.
virtual float GetStateExpansionScore(const CTCBeamState& state,
float previous_score) const {
return previous_score;
}
// GetStateEndExpansionScore should be an inexpensive method to retrieve the
// (cached) expansion score computed within ExpandStateEnd. The score is
// multiplied (log-addition) with the final probability of the beam.
//
// The score returned should be a log-probability.
virtual float GetStateEndExpansionScore(const CTCBeamState& state) const {
return 0;
}
};
} // namespace ctc
} // namespace experimental
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_SCORER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_beam_scorer.h | C++ | apache-2.0 | 3,534 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Copied from tensorflow/core/util/ctc/ctc_beam_search.h
// TODO(b/111524997): Remove this file.
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_SEARCH_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_SEARCH_H_
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <vector>
#include "third_party/eigen3/Eigen/Core"
#include "tensorflow/lite/experimental/kernels/ctc_beam_entry.h"
#include "tensorflow/lite/experimental/kernels/ctc_beam_scorer.h"
#include "tensorflow/lite/experimental/kernels/ctc_decoder.h"
#include "tensorflow/lite/experimental/kernels/ctc_loss_util.h"
#include "tensorflow/lite/experimental/kernels/top_n.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace experimental {
namespace ctc {
template <typename CTCBeamState = ctc_beam_search::EmptyBeamState,
typename CTCBeamComparer =
ctc_beam_search::BeamComparer<CTCBeamState>>
class CTCBeamSearchDecoder : public CTCDecoder {
// Beam Search
//
// Example (GravesTh Fig. 7.5):
// a -
// P = [ 0.3 0.7 ] t = 0
// [ 0.4 0.6 ] t = 1
//
// Then P(l = -) = P(--) = 0.7 * 0.6 = 0.42
// P(l = a) = P(a-) + P(aa) + P(-a) = 0.3*0.4 + ... = 0.58
//
// In this case, Best Path decoding is suboptimal.
//
// For Beam Search, we use the following main recurrence relations:
//
// Relation 1:
// ---------------------------------------------------------- Eq. 1
// P(l=abcd @ t=7) = P(l=abc @ t=6) * P(d @ 7)
// + P(l=abcd @ t=6) * (P(d @ 7) + P(- @ 7))
// where P(l=? @ t=7), ? = a, ab, abc, abcd are all stored and
// updated recursively in the beam entry.
//
// Relation 2:
// ---------------------------------------------------------- Eq. 2
// P(l=abc? @ t=3) = P(l=abc @ t=2) * P(? @ 3)
// for ? in a, b, d, ..., (not including c or the blank index),
// and the recurrence starts from the beam entry for P(l=abc @ t=2).
//
// For this case, the length of the new sequence equals t+1 (t
// starts at 0). This special case can be calculated as:
// P(l=abc? @ t=3) = P(a @ 0)*P(b @ 1)*P(c @ 2)*P(? @ 3)
// but we calculate it recursively for speed purposes.
typedef ctc_beam_search::BeamEntry<CTCBeamState> BeamEntry;
typedef ctc_beam_search::BeamRoot<CTCBeamState> BeamRoot;
typedef ctc_beam_search::BeamProbability BeamProbability;
public:
typedef BaseBeamScorer<CTCBeamState> DefaultBeamScorer;
// The beam search decoder is constructed specifying the beam_width (number of
// candidates to keep at each decoding timestep) and a beam scorer (used for
// custom scoring, for example enabling the use of a language model).
// The ownership of the scorer remains with the caller. The default
// implementation, CTCBeamSearchDecoder<>::DefaultBeamScorer, generates the
// standard beam search.
CTCBeamSearchDecoder(int num_classes, int beam_width,
BaseBeamScorer<CTCBeamState>* scorer, int batch_size = 1,
bool merge_repeated = false)
: CTCDecoder(num_classes, batch_size, merge_repeated),
beam_width_(beam_width),
leaves_(beam_width),
beam_scorer_(scorer) {
Reset();
}
~CTCBeamSearchDecoder() override {}
// Run the hibernating beam search algorithm on the given input.
bool Decode(const CTCDecoder::SequenceLength& seq_len,
const std::vector<CTCDecoder::Input>& input,
std::vector<CTCDecoder::Output>* output,
CTCDecoder::ScoreOutput* scores) override;
// Calculate the next step of the beam search and update the internal state.
template <typename Vector>
void Step(const Vector& log_input_t);
template <typename Vector>
float GetTopK(const int K, const Vector& input,
std::vector<float>* top_k_logits,
std::vector<int>* top_k_indices);
// Retrieve the beam scorer instance used during decoding.
BaseBeamScorer<CTCBeamState>* GetBeamScorer() const { return beam_scorer_; }
// Set label selection parameters for faster decoding.
// See comments for label_selection_size_ and label_selection_margin_.
void SetLabelSelectionParameters(int label_selection_size,
float label_selection_margin) {
label_selection_size_ = label_selection_size;
label_selection_margin_ = label_selection_margin;
}
// Reset the beam search
void Reset();
// Extract the top n paths at current time step
bool TopPaths(int n, std::vector<std::vector<int>>* paths,
std::vector<float>* log_probs, bool merge_repeated) const;
private:
int beam_width_;
// Label selection is designed to avoid possibly very expensive scorer calls,
// by pruning the hypotheses based on the input alone.
// Label selection size controls how many items in each beam are passed
// through to the beam scorer. Only items with top N input scores are
// considered.
// Label selection margin controls the difference between minimal input score
// (versus the best scoring label) for an item to be passed to the beam
// scorer. This margin is expressed in terms of log-probability.
// Default is to do no label selection.
// For more detail: https://research.google.com/pubs/pub44823.html
int label_selection_size_ = 0; // zero means unlimited
float label_selection_margin_ = -1; // -1 means unlimited.
gtl::TopN<BeamEntry*, CTCBeamComparer> leaves_;
std::unique_ptr<BeamRoot> beam_root_;
BaseBeamScorer<CTCBeamState>* beam_scorer_;
CTCBeamSearchDecoder(const CTCBeamSearchDecoder&) = delete;
void operator=(const CTCBeamSearchDecoder&) = delete;
};
template <typename CTCBeamState, typename CTCBeamComparer>
bool CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Decode(
const CTCDecoder::SequenceLength& seq_len,
const std::vector<CTCDecoder::Input>& input,
std::vector<CTCDecoder::Output>* output, ScoreOutput* scores) {
// Storage for top paths.
std::vector<std::vector<int>> beams;
std::vector<float> beam_log_probabilities;
int top_n = output->size();
if (std::any_of(output->begin(), output->end(),
[this](const CTCDecoder::Output& output) -> bool {
return output.size() < this->batch_size_;
})) {
return false;
}
if (scores->rows() < batch_size_ || scores->cols() < top_n) {
return false;
}
for (int b = 0; b < batch_size_; ++b) {
int seq_len_b = seq_len[b];
Reset();
for (int t = 0; t < seq_len_b; ++t) {
// Pass log-probabilities for this example + time.
Step(input[t].row(b));
} // for (int t...
// O(n * log(n))
std::unique_ptr<std::vector<BeamEntry*>> branches(leaves_.Extract());
leaves_.Reset();
for (int i = 0; i < branches->size(); ++i) {
BeamEntry* entry = (*branches)[i];
beam_scorer_->ExpandStateEnd(&entry->state);
entry->newp.total +=
beam_scorer_->GetStateEndExpansionScore(entry->state);
leaves_.push(entry);
}
bool status =
TopPaths(top_n, &beams, &beam_log_probabilities, merge_repeated_);
if (!status) {
return status;
}
TFLITE_DCHECK_EQ(top_n, beam_log_probabilities.size());
TFLITE_DCHECK_EQ(beams.size(), beam_log_probabilities.size());
for (int i = 0; i < top_n; ++i) {
// Copy output to the correct beam + batch
(*output)[i][b].swap(beams[i]);
(*scores)(b, i) = -beam_log_probabilities[i];
}
} // for (int b...
return true;
}
template <typename CTCBeamState, typename CTCBeamComparer>
template <typename Vector>
float CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::GetTopK(
const int K, const Vector& input, std::vector<float>* top_k_logits,
std::vector<int>* top_k_indices) {
// Find Top K choices, complexity nk in worst case. The array input is read
// just once.
TFLITE_DCHECK_EQ(num_classes_, input.size());
top_k_logits->clear();
top_k_indices->clear();
top_k_logits->resize(K, -INFINITY);
top_k_indices->resize(K, -1);
for (int j = 0; j < num_classes_ - 1; ++j) {
const float logit = input(j);
if (logit > (*top_k_logits)[K - 1]) {
int k = K - 1;
while (k > 0 && logit > (*top_k_logits)[k - 1]) {
(*top_k_logits)[k] = (*top_k_logits)[k - 1];
(*top_k_indices)[k] = (*top_k_indices)[k - 1];
k--;
}
(*top_k_logits)[k] = logit;
(*top_k_indices)[k] = j;
}
}
// Return max value which is in 0th index or blank character logit
return std::max((*top_k_logits)[0], input(num_classes_ - 1));
}
template <typename CTCBeamState, typename CTCBeamComparer>
template <typename Vector>
void CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Step(
const Vector& raw_input) {
std::vector<float> top_k_logits;
std::vector<int> top_k_indices;
const bool top_k =
(label_selection_size_ > 0 && label_selection_size_ < raw_input.size());
// Number of character classes to consider in each step.
const int max_classes = top_k ? label_selection_size_ : (num_classes_ - 1);
// Get max coefficient and remove it from raw_input later.
float max_coeff;
if (top_k) {
max_coeff = GetTopK(label_selection_size_, raw_input, &top_k_logits,
&top_k_indices);
} else {
max_coeff = raw_input.maxCoeff();
}
// Get normalization term of softmax: log(sum(exp(logit[j]-max_coeff))).
float logsumexp = 0.0;
for (int j = 0; j < raw_input.size(); ++j) {
logsumexp += Eigen::numext::exp(raw_input(j) - max_coeff);
}
logsumexp = Eigen::numext::log(logsumexp);
// Final normalization offset to get correct log probabilities.
float norm_offset = max_coeff + logsumexp;
const float label_selection_input_min =
(label_selection_margin_ >= 0) ? (max_coeff - label_selection_margin_)
: -std::numeric_limits<float>::infinity();
// Extract the beams sorted in decreasing new probability
TFLITE_DCHECK_EQ(num_classes_, raw_input.size());
std::unique_ptr<std::vector<BeamEntry*>> branches(leaves_.Extract());
leaves_.Reset();
for (BeamEntry* b : *branches) {
// P(.. @ t) becomes the new P(.. @ t-1)
b->oldp = b->newp;
}
for (BeamEntry* b : *branches) {
if (b->parent != nullptr) { // if not the root
if (b->parent->Active()) {
// If last two sequence characters are identical:
// Plabel(l=acc @ t=6) = (Plabel(l=acc @ t=5)
// + Pblank(l=ac @ t=5))
// else:
// Plabel(l=abc @ t=6) = (Plabel(l=abc @ t=5)
// + P(l=ab @ t=5))
float previous = (b->label == b->parent->label) ? b->parent->oldp.blank
: b->parent->oldp.total;
b->newp.label =
LogSumExp(b->newp.label,
beam_scorer_->GetStateExpansionScore(b->state, previous));
}
// Plabel(l=abc @ t=6) *= P(c @ 6)
b->newp.label += raw_input(b->label) - norm_offset;
}
// Pblank(l=abc @ t=6) = P(l=abc @ t=5) * P(- @ 6)
b->newp.blank = b->oldp.total + raw_input(blank_index_) - norm_offset;
// P(l=abc @ t=6) = Plabel(l=abc @ t=6) + Pblank(l=abc @ t=6)
b->newp.total = LogSumExp(b->newp.blank, b->newp.label);
// Push the entry back to the top paths list.
// Note, this will always fill leaves back up in sorted order.
leaves_.push(b);
}
// we need to resort branches in descending oldp order.
// branches is in descending oldp order because it was
// originally in descending newp order and we copied newp to oldp.
// Grow new leaves
for (BeamEntry* b : *branches) {
// A new leaf (represented by its BeamProbability) is a candidate
// iff its total probability is nonzero and either the beam list
// isn't full, or the lowest probability entry in the beam has a
// lower probability than the leaf.
auto is_candidate = [this](const BeamProbability& prob) {
return (prob.total > kLogZero &&
(leaves_.size() < beam_width_ ||
prob.total > leaves_.peek_bottom()->newp.total));
};
if (!is_candidate(b->oldp)) {
continue;
}
for (int ind = 0; ind < max_classes; ind++) {
const int label = top_k ? top_k_indices[ind] : ind;
const float logit = top_k ? top_k_logits[ind] : raw_input(ind);
// Perform label selection: if input for this label looks very
// unpromising, never evaluate it with a scorer.
// We may compare logits instead of log probabilities,
// since the difference is the same in both cases.
if (logit < label_selection_input_min) {
continue;
}
BeamEntry& c = b->GetChild(label);
if (!c.Active()) {
// Pblank(l=abcd @ t=6) = 0
c.newp.blank = kLogZero;
// If new child label is identical to beam label:
// Plabel(l=abcc @ t=6) = Pblank(l=abc @ t=5) * P(c @ 6)
// Otherwise:
// Plabel(l=abcd @ t=6) = P(l=abc @ t=5) * P(d @ 6)
beam_scorer_->ExpandState(b->state, b->label, &c.state, c.label);
float previous = (c.label == b->label) ? b->oldp.blank : b->oldp.total;
c.newp.label = logit - norm_offset +
beam_scorer_->GetStateExpansionScore(c.state, previous);
// P(l=abcd @ t=6) = Plabel(l=abcd @ t=6)
c.newp.total = c.newp.label;
if (is_candidate(c.newp)) {
// Before adding the new node to the beam, check if the beam
// is already at maximum width.
if (leaves_.size() == beam_width_) {
// Bottom is no longer in the beam search. Reset
// its probability; signal it's no longer in the beam search.
BeamEntry* bottom = leaves_.peek_bottom();
bottom->newp.Reset();
}
leaves_.push(&c);
} else {
// Deactivate child.
c.oldp.Reset();
c.newp.Reset();
}
}
}
} // for (BeamEntry* b...
}
template <typename CTCBeamState, typename CTCBeamComparer>
void CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::Reset() {
leaves_.Reset();
// This beam root, and all of its children, will be in memory until
// the next reset.
beam_root_.reset(new BeamRoot(nullptr, -1));
beam_root_->RootEntry()->newp.total = 0.0; // ln(1)
beam_root_->RootEntry()->newp.blank = 0.0; // ln(1)
// Add the root as the initial leaf.
leaves_.push(beam_root_->RootEntry());
// Call initialize state on the root object.
beam_scorer_->InitializeState(&beam_root_->RootEntry()->state);
}
template <typename CTCBeamState, typename CTCBeamComparer>
bool CTCBeamSearchDecoder<CTCBeamState, CTCBeamComparer>::TopPaths(
int n, std::vector<std::vector<int>>* paths, std::vector<float>* log_probs,
bool merge_repeated) const {
TFLITE_DCHECK(paths);
TFLITE_DCHECK(log_probs);
paths->clear();
log_probs->clear();
if (n > beam_width_) {
return false;
}
if (n > leaves_.size()) {
return false;
}
gtl::TopN<BeamEntry*, CTCBeamComparer> top_branches(n);
// O(beam_width_ * log(n)), space complexity is O(n)
for (auto it = leaves_.unsorted_begin(); it != leaves_.unsorted_end(); ++it) {
top_branches.push(*it);
}
// O(n * log(n))
std::unique_ptr<std::vector<BeamEntry*>> branches(top_branches.Extract());
for (int i = 0; i < n; ++i) {
BeamEntry* e((*branches)[i]);
paths->push_back(e->LabelSeq(merge_repeated));
log_probs->push_back(e->newp.total);
}
return true;
}
} // namespace ctc
} // namespace experimental
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_BEAM_SEARCH_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_beam_search.h | C++ | apache-2.0 | 16,443 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <vector>
#include "flatbuffers/flexbuffers.h" // from @flatbuffers
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/kernels/ctc_beam_search.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace ops {
namespace experimental {
namespace ctc_beam_search_decoder {
constexpr int kInputsTensor = 0;
constexpr int kSequenceLengthTensor = 1;
typedef struct {
int beam_width;
int top_paths;
bool merge_repeated;
} CTCBeamSearchDecoderParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
TFLITE_CHECK(buffer != nullptr);
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
CTCBeamSearchDecoderParams* option = new CTCBeamSearchDecoderParams;
option->beam_width = m["beam_width"].AsInt32();
option->top_paths = m["top_paths"].AsInt32();
option->merge_repeated = m["merge_repeated"].AsBool();
return option;
}
void Free(TfLiteContext* context, void* buffer) {
delete reinterpret_cast<CTCBeamSearchDecoderParams*>(buffer);
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);
const int top_paths = option->top_paths;
TF_LITE_ENSURE(context, option->beam_width >= top_paths);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 2);
// The outputs should be top_paths * 3 + 1.
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 3 * top_paths + 1);
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
TF_LITE_ENSURE_EQ(context, NumDimensions(inputs), 3);
// TensorFlow only supports float.
TF_LITE_ENSURE_EQ(context, inputs->type, kTfLiteFloat32);
const int batch_size = SizeOfDimension(inputs, 1);
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
TF_LITE_ENSURE_EQ(context, NumDimensions(sequence_length), 1);
TF_LITE_ENSURE_EQ(context, NumElements(sequence_length), batch_size);
// TensorFlow only supports int32.
TF_LITE_ENSURE_EQ(context, sequence_length->type, kTfLiteInt32);
// Resize decoded outputs.
// Do not resize indices & values cause we don't know the values yet.
for (int i = 0; i < top_paths; ++i) {
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i, &indices));
SetTensorToDynamic(indices);
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, i + top_paths, &values));
SetTensorToDynamic(values);
TfLiteTensor* output_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, i + 2 * top_paths,
&output_shape));
SetTensorToDynamic(output_shape);
}
// Resize log probability outputs.
TfLiteTensor* log_probability_output;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, top_paths * 3,
&log_probability_output));
TfLiteIntArray* log_probability_output_shape_array = TfLiteIntArrayCreate(2);
log_probability_output_shape_array->data[0] = batch_size;
log_probability_output_shape_array->data[1] = top_paths;
return context->ResizeTensor(context, log_probability_output,
log_probability_output_shape_array);
}
TfLiteStatus Resize(TfLiteContext* context,
std::initializer_list<int32_t> output_shape,
TfLiteTensor* output) {
const int dimensions = output_shape.size();
TfLiteIntArray* output_shape_array = TfLiteIntArrayCreate(dimensions);
int i = 0;
for (const int v : output_shape) {
output_shape_array->data[i++] = v;
}
return context->ResizeTensor(context, output, output_shape_array);
}
TfLiteStatus StoreAllDecodedSequences(
TfLiteContext* context,
const std::vector<std::vector<std::vector<int>>>& sequences,
TfLiteNode* node, int top_paths) {
const int32_t batch_size = sequences.size();
std::vector<int32_t> num_entries(top_paths, 0);
// Calculate num_entries per path
for (const auto& batch_s : sequences) {
TF_LITE_ENSURE_EQ(context, batch_s.size(), top_paths);
for (int p = 0; p < top_paths; ++p) {
num_entries[p] += batch_s[p].size();
}
}
for (int p = 0; p < top_paths; ++p) {
const int32_t p_num = num_entries[p];
// Resize the decoded outputs.
TfLiteTensor* indices;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p, &indices));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num, 2}, indices));
TfLiteTensor* values;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, p + top_paths, &values));
TF_LITE_ENSURE_OK(context, Resize(context, {p_num}, values));
TfLiteTensor* decoded_shape;
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, p + 2 * top_paths,
&decoded_shape));
TF_LITE_ENSURE_OK(context, Resize(context, {2}, decoded_shape));
int32_t max_decoded = 0;
int32_t offset = 0;
int32_t* indices_data = GetTensorData<int32_t>(indices);
int32_t* values_data = GetTensorData<int32_t>(values);
int32_t* decoded_shape_data = GetTensorData<int32_t>(decoded_shape);
for (int b = 0; b < batch_size; ++b) {
auto& p_batch = sequences[b][p];
int32_t num_decoded = p_batch.size();
max_decoded = std::max(max_decoded, num_decoded);
std::copy_n(p_batch.begin(), num_decoded, values_data + offset);
for (int32_t t = 0; t < num_decoded; ++t, ++offset) {
indices_data[offset * 2] = b;
indices_data[offset * 2 + 1] = t;
}
}
decoded_shape_data[0] = batch_size;
decoded_shape_data[1] = max_decoded;
}
return kTfLiteOk;
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
const TfLiteTensor* inputs;
TF_LITE_ENSURE_OK(context,
GetInputSafe(context, node, kInputsTensor, &inputs));
const TfLiteTensor* sequence_length;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kSequenceLengthTensor,
&sequence_length));
const CTCBeamSearchDecoderParams* option =
reinterpret_cast<CTCBeamSearchDecoderParams*>(node->user_data);
const int max_time = SizeOfDimension(inputs, 0);
const int batch_size = SizeOfDimension(inputs, 1);
const int num_classes = SizeOfDimension(inputs, 2);
const int beam_width = option->beam_width;
const int top_paths = option->top_paths;
const bool merge_repeated = option->merge_repeated;
// Validate sequence length is less or equal than max time.
for (int i = 0; i < batch_size; ++i) {
TF_LITE_ENSURE(context,
max_time >= GetTensorData<int32_t>(sequence_length)[i]);
}
// The following logic is implemented like
// tensorflow/core/kernels/ctc_decoder_ops.cc
std::vector<optimized_ops::TTypes<float>::UnalignedConstMatrix> input_list_t;
for (std::size_t t = 0; t < max_time; ++t) {
input_list_t.emplace_back(
GetTensorData<float>(inputs) + t * batch_size * num_classes, batch_size,
num_classes);
}
::tflite::experimental::ctc::CTCBeamSearchDecoder<>::DefaultBeamScorer
beam_scorer;
::tflite::experimental::ctc::CTCBeamSearchDecoder<> beam_search(
num_classes, beam_width, &beam_scorer, 1 /* batch_size */,
merge_repeated);
// Allocate temporary memory for holding chip operation data.
float* input_chip_t_data =
static_cast<float*>(malloc(num_classes * sizeof(float)));
Eigen::array<Eigen::DenseIndex, 1> dims;
dims[0] = num_classes;
optimized_ops::TTypes<float>::Flat input_chip_t(input_chip_t_data, dims);
std::vector<std::vector<std::vector<int>>> best_paths(batch_size);
std::vector<float> log_probs;
TfLiteTensor* log_probabilities;
TF_LITE_ENSURE_OK(
context, GetOutputSafe(context, node, 3 * top_paths, &log_probabilities));
float* log_probabilities_output = GetTensorData<float>(log_probabilities);
// Assumption: the blank index is num_classes - 1
for (int b = 0; b < batch_size; ++b) {
auto& best_paths_b = best_paths[b];
best_paths_b.resize(top_paths);
for (int t = 0; t < GetTensorData<int32_t>(sequence_length)[b]; ++t) {
input_chip_t = input_list_t[t].chip(b, 0);
auto input_bi =
Eigen::Map<const Eigen::ArrayXf>(input_chip_t.data(), num_classes);
beam_search.Step(input_bi);
}
TF_LITE_ENSURE(context, beam_search.TopPaths(top_paths, &best_paths_b,
&log_probs, merge_repeated));
beam_search.Reset();
// Fill in log_probabilities output.
for (int bp = 0; bp < top_paths; ++bp) {
log_probabilities_output[b * top_paths + bp] = log_probs[bp];
}
}
free(input_chip_t_data);
return StoreAllDecodedSequences(context, best_paths, node, top_paths);
}
} // namespace ctc_beam_search_decoder
TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER() {
static TfLiteRegistration r = {
ctc_beam_search_decoder::Init, ctc_beam_search_decoder::Free,
ctc_beam_search_decoder::Prepare, ctc_beam_search_decoder::Eval};
return &r;
}
} // namespace experimental
} // namespace ops
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_beam_search_decoder.cc | C++ | apache-2.0 | 10,305 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <functional>
#include <memory>
#include <vector>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/register.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model.h"
namespace tflite {
namespace ops {
namespace experimental {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
TfLiteRegistration* Register_CTC_BEAM_SEARCH_DECODER();
namespace {
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
class CTCBeamSearchDecoderOpModel : public SingleOpModel {
public:
CTCBeamSearchDecoderOpModel(std::initializer_list<int> input_shape,
std::initializer_list<int> sequence_length_shape,
int beam_width, int top_paths,
bool merge_repeated) {
inputs_ = AddInput(TensorType_FLOAT32);
sequence_length_ = AddInput(TensorType_INT32);
for (int i = 0; i < top_paths * 3; ++i) {
outputs_.push_back(AddOutput(TensorType_INT32));
}
outputs_.push_back(AddOutput(TensorType_FLOAT32));
flexbuffers::Builder fbb;
fbb.Map([&]() {
fbb.Int("beam_width", beam_width);
fbb.Int("top_paths", top_paths);
fbb.Bool("merge_repeated", merge_repeated);
});
fbb.Finish();
SetCustomOp("CTCBeamSearchDecoder", fbb.GetBuffer(),
Register_CTC_BEAM_SEARCH_DECODER);
BuildInterpreter({input_shape, sequence_length_shape});
}
int inputs() { return inputs_; }
int sequence_length() { return sequence_length_; }
std::vector<std::vector<int>> GetDecodedOutpus() {
std::vector<std::vector<int>> outputs;
for (int i = 0; i < outputs_.size() - 1; ++i) {
outputs.push_back(ExtractVector<int>(outputs_[i]));
}
return outputs;
}
std::vector<float> GetLogProbabilitiesOutput() {
return ExtractVector<float>(outputs_[outputs_.size() - 1]);
}
std::vector<std::vector<int>> GetOutputShapes() {
std::vector<std::vector<int>> output_shapes;
for (const int output : outputs_) {
output_shapes.push_back(GetTensorShape(output));
}
return output_shapes;
}
private:
int inputs_;
int sequence_length_;
std::vector<int> outputs_;
};
TEST(CTCBeamSearchTest, SimpleTest) {
CTCBeamSearchDecoderOpModel m({2, 1, 2}, {1}, 1, 1, true);
m.PopulateTensor<float>(m.inputs(),
{-0.50922557, -1.35512652, -2.55445064, -1.58419356});
m.PopulateTensor<int>(m.sequence_length(), {2});
m.Invoke();
// Make sure the output shapes are right.
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(1, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(1));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(1, 1));
// Check decoded outputs.
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(1, 1));
// Check log probabilities output.
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-0.357094})));
}
TEST(CTCBeamSearchTest, MultiBatchTest) {
CTCBeamSearchDecoderOpModel m({3, 3, 3}, {3}, 1, 1, true);
m.PopulateTensor<float>(
m.inputs(),
{-0.63649208, -0.00487571, -0.04249819, -0.67754697, -1.0341399,
-2.14717721, -0.77686821, -3.41973774, -0.05151402, -0.21482619,
-0.57411168, -1.45039917, -0.73769373, -2.10941739, -0.44818325,
-0.25287673, -2.80057302, -0.54748312, -0.73334867, -0.86537719,
-0.2065197, -0.18725838, -1.42770405, -0.86051965, -1.61642301,
-2.07275114, -0.9201845});
m.PopulateTensor<int>(m.sequence_length(), {3, 3, 3});
m.Invoke();
// Make sure the output shapes are right.
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(4, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(4));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(3, 1));
// Check decoded outputs.
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 0, 1, 1, 0, 2, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(1, 0, 0, 0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(3, 2));
// Check log probabilities output.
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-1.88343, -1.41188, -1.20958})));
}
TEST(CTCBeamSearchTest, MultiPathsTest) {
CTCBeamSearchDecoderOpModel m({3, 2, 5}, {2}, 3, 2, true);
m.PopulateTensor<float>(
m.inputs(),
{-2.206851, -0.09542714, -0.2393415, -3.81866197, -0.27241158,
-0.20371124, -0.68236623, -1.1397166, -0.17422639, -1.85224048,
-0.9406037, -0.32544678, -0.21846784, -0.38377237, -0.33498676,
-0.10139782, -0.51886883, -0.21678554, -0.15267063, -1.91164412,
-0.31328673, -0.27462716, -0.65975336, -1.53671973, -2.76554225,
-0.23920634, -1.2370502, -4.98751576, -3.12995717, -0.43129368});
m.PopulateTensor<int>(m.sequence_length(), {3, 3});
m.Invoke();
// Make sure the output shapes are right.
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 7);
EXPECT_THAT(output_shapes[0], ElementsAre(4, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(3, 2));
EXPECT_THAT(output_shapes[2], ElementsAre(4));
EXPECT_THAT(output_shapes[3], ElementsAre(3));
EXPECT_THAT(output_shapes[4], ElementsAre(2));
EXPECT_THAT(output_shapes[5], ElementsAre(2));
EXPECT_THAT(output_shapes[6], ElementsAre(2, 2));
// Check decoded outputs.
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 6);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 0, 1, 1, 0, 1, 1));
EXPECT_THAT(decoded_outputs[1], ElementsAre(0, 0, 0, 1, 1, 0));
EXPECT_THAT(decoded_outputs[2], ElementsAre(1, 2, 3, 0));
EXPECT_THAT(decoded_outputs[3], ElementsAre(2, 1, 0));
EXPECT_THAT(decoded_outputs[4], ElementsAre(2, 2));
EXPECT_THAT(decoded_outputs[5], ElementsAre(2, 2));
// Check log probabilities output.
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(
ArrayFloatNear({-2.65148, -2.65864, -2.17914, -2.61357})));
}
TEST(CTCBeamSearchTest, NonEqualSequencesTest) {
CTCBeamSearchDecoderOpModel m({3, 3, 4}, {3}, 3, 1, true);
m.PopulateTensor<float>(
m.inputs(),
{-1.26658163, -0.25760023, -0.03917975, -0.63772235, -0.03794756,
-0.45063099, -0.27706473, -0.01569179, -0.59940385, -0.35700127,
-0.48920721, -1.42635476, -1.3462478, -0.02565498, -0.30179568,
-0.6491698, -0.55017719, -2.92291466, -0.92522973, -0.47592022,
-0.07099135, -0.31575624, -0.86345281, -0.36017021, -0.79208612,
-1.75306124, -0.65089224, -0.00912786, -0.42915003, -1.72606203,
-1.66337589, -0.70800793, -2.52272352, -0.67329562, -2.49145522,
-0.49786342});
m.PopulateTensor<int>(m.sequence_length(), {1, 2, 3});
m.Invoke();
// Make sure the output shapes are right.
const std::vector<std::vector<int>>& output_shapes = m.GetOutputShapes();
EXPECT_EQ(output_shapes.size(), 4);
EXPECT_THAT(output_shapes[0], ElementsAre(3, 2));
EXPECT_THAT(output_shapes[1], ElementsAre(3));
EXPECT_THAT(output_shapes[2], ElementsAre(2));
EXPECT_THAT(output_shapes[3], ElementsAre(3, 1));
// Check decoded outputs.
const std::vector<std::vector<int>>& decoded_outputs = m.GetDecodedOutpus();
EXPECT_EQ(decoded_outputs.size(), 3);
EXPECT_THAT(decoded_outputs[0], ElementsAre(0, 0, 1, 0, 2, 0));
EXPECT_THAT(decoded_outputs[1], ElementsAre(2, 0, 1));
EXPECT_THAT(decoded_outputs[2], ElementsAre(3, 1));
// Check log probabilities output.
EXPECT_THAT(m.GetLogProbabilitiesOutput(),
ElementsAreArray(ArrayFloatNear({-0.97322, -1.16334, -2.15553})));
}
} // namespace
} // namespace experimental
} // namespace ops
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_beam_search_decoder_test.cc | C++ | apache-2.0 | 9,108 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Copied from tensorflow/core/util/ctc/ctc_decoder.h
// TODO(b/111524997): Remove this file.
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_DECODER_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_DECODER_H_
#include <memory>
#include <vector>
#include "third_party/eigen3/Eigen/Core"
namespace tflite {
namespace experimental {
namespace ctc {
// The CTCDecoder is an abstract interface to be implemented when providing a
// decoding method on the timestep output of a RNN trained with CTC loss.
//
// The two types of decoding available are:
// - greedy path, through the CTCGreedyDecoder
// - beam search, through the CTCBeamSearchDecoder
class CTCDecoder {
public:
typedef Eigen::Map<const Eigen::ArrayXi> SequenceLength;
typedef Eigen::Map<const Eigen::MatrixXf> Input;
typedef std::vector<std::vector<int>> Output;
typedef Eigen::Map<Eigen::MatrixXf> ScoreOutput;
CTCDecoder(int num_classes, int batch_size, bool merge_repeated)
: num_classes_(num_classes),
blank_index_(num_classes - 1),
batch_size_(batch_size),
merge_repeated_(merge_repeated) {}
virtual ~CTCDecoder() {}
// Dimensionality of the input/output is expected to be:
// - seq_len[b] - b = 0 to batch_size_
// - input[t].rows(b) - t = 0 to timesteps; b = 0 t batch_size_
// - output.size() specifies the number of beams to be returned.
// - scores(b, i) - b = 0 to batch_size; i = 0 to output.size()
virtual bool Decode(const SequenceLength& seq_len,
const std::vector<Input>& input,
std::vector<Output>* output, ScoreOutput* scores) = 0;
int batch_size() { return batch_size_; }
int num_classes() { return num_classes_; }
protected:
int num_classes_;
int blank_index_;
int batch_size_;
bool merge_repeated_;
};
// CTCGreedyDecoder is an implementation of the simple best path decoding
// algorithm, selecting at each timestep the most likely class at each timestep.
class CTCGreedyDecoder : public CTCDecoder {
public:
CTCGreedyDecoder(int num_classes, int batch_size, bool merge_repeated)
: CTCDecoder(num_classes, batch_size, merge_repeated) {}
bool Decode(const CTCDecoder::SequenceLength& seq_len,
const std::vector<CTCDecoder::Input>& input,
std::vector<CTCDecoder::Output>* output,
CTCDecoder::ScoreOutput* scores) override {
if (output->empty() || (*output)[0].size() < batch_size_) {
return false;
}
if (scores->rows() < batch_size_ || scores->cols() == 0) {
return false;
}
// For each batch entry, identify the transitions
for (int b = 0; b < batch_size_; ++b) {
int seq_len_b = seq_len[b];
// Only writing to beam 0
std::vector<int>& output_b = (*output)[0][b];
int prev_class_ix = -1;
(*scores)(b, 0) = 0;
for (int t = 0; t < seq_len_b; ++t) {
auto row = input[t].row(b);
int max_class_ix;
(*scores)(b, 0) += -row.maxCoeff(&max_class_ix);
if (max_class_ix != blank_index_ &&
!(merge_repeated_ && max_class_ix == prev_class_ix)) {
output_b.push_back(max_class_ix);
}
prev_class_ix = max_class_ix;
}
}
return true;
}
};
} // namespace ctc
} // namespace experimental
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_DECODER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_decoder.h | C++ | apache-2.0 | 4,056 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Copied from tensorflow/core/util/ctc/ctc_loss_util.h
// TODO(b/111524997): Remove this file.
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_LOSS_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_LOSS_UTIL_H_
#include <cmath>
#include <limits>
namespace tflite {
namespace experimental {
namespace ctc {
const float kLogZero = -std::numeric_limits<float>::infinity();
// Add logarithmic probabilities using:
// ln(a + b) = ln(a) + ln(1 + exp(ln(b) - ln(a)))
// The two inputs are assumed to be log probabilities.
// (GravesTh) Eq. 7.18
inline float LogSumExp(float log_prob_1, float log_prob_2) {
// Always have 'b' be the smaller number to avoid the exponential from
// blowing up.
if (log_prob_1 == kLogZero && log_prob_2 == kLogZero) {
return kLogZero;
} else {
return (log_prob_1 > log_prob_2)
? log_prob_1 + log1pf(expf(log_prob_2 - log_prob_1))
: log_prob_2 + log1pf(expf(log_prob_1 - log_prob_2));
}
}
} // namespace ctc
} // namespace experimental
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_CTC_LOSS_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/ctc_loss_util.h | C++ | apache-2.0 | 1,781 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This simple class finds the top n elements of an incrementally provided set
// of elements which you push one at a time. If the number of elements exceeds
// n, the lowest elements are incrementally dropped. At the end you get
// a vector of the top elements sorted in descending order (through Extract() or
// ExtractNondestructive()), or a vector of the top elements but not sorted
// (through ExtractUnsorted() or ExtractUnsortedNondestructive()).
//
// The value n is specified in the constructor. If there are p elements pushed
// altogether:
// The total storage requirements are O(min(n, p)) elements
// The running time is O(p * log(min(n, p))) comparisons
// If n is a constant, the total storage required is a constant and the running
// time is linear in p.
//
// NOTE(zhifengc): There is a way to do this in O(min(n, p)) storage and O(p)
// runtime. The basic idea is to repeatedly fill up a buffer of 2 * n elements,
// discarding the lowest n elements whenever the buffer is full using a linear-
// time median algorithm. This may have better performance when the input
// sequence is partially sorted.
//
// NOTE(zhifengc): This class should be redesigned to avoid reallocating a
// vector for each Extract.
// Copied from tensorflow/core/lib/gtl/top_n.h
// TODO(b/111524997): Remove this file.
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_TOP_N_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_TOP_N_H_
#include <stddef.h>
#include <algorithm>
#include <functional>
#include <string>
#include <vector>
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace gtl {
// Cmp is an stl binary predicate. Note that Cmp is the "greater" predicate,
// not the more commonly used "less" predicate.
//
// If you use a "less" predicate here, the TopN will pick out the bottom N
// elements out of the ones passed to it, and it will return them sorted in
// ascending order.
//
// TopN is rule-of-zero copyable and movable if its members are.
template <class T, class Cmp = std::greater<T> >
class TopN {
public:
// The TopN is in one of the three states:
//
// o UNORDERED: this is the state an instance is originally in,
// where the elements are completely orderless.
//
// o BOTTOM_KNOWN: in this state, we keep the invariant that there
// is at least one element in it, and the lowest element is at
// position 0. The elements in other positions remain
// unsorted. This state is reached if the state was originally
// UNORDERED and a peek_bottom() function call is invoked.
//
// o HEAP_SORTED: in this state, the array is kept as a heap and
// there are exactly (limit_+1) elements in the array. This
// state is reached when at least (limit_+1) elements are
// pushed in.
//
// The state transition graph is at follows:
//
// peek_bottom() (limit_+1) elements
// UNORDERED --------------> BOTTOM_KNOWN --------------------> HEAP_SORTED
// | ^
// | (limit_+1) elements |
// +-----------------------------------------------------------+
enum State { UNORDERED, BOTTOM_KNOWN, HEAP_SORTED };
using UnsortedIterator = typename std::vector<T>::const_iterator;
// 'limit' is the maximum number of top results to return.
explicit TopN(size_t limit) : TopN(limit, Cmp()) {}
TopN(size_t limit, const Cmp &cmp) : limit_(limit), cmp_(cmp) {}
size_t limit() const { return limit_; }
// Number of elements currently held by this TopN object. This
// will be no greater than 'limit' passed to the constructor.
size_t size() const { return std::min(elements_.size(), limit_); }
bool empty() const { return size() == 0; }
// If you know how many elements you will push at the time you create the
// TopN object, you can call reserve to preallocate the memory that TopN
// will need to process all 'n' pushes. Calling this method is optional.
void reserve(size_t n) { elements_.reserve(std::min(n, limit_ + 1)); }
// Push 'v'. If the maximum number of elements was exceeded, drop the
// lowest element and return it in 'dropped' (if given). If the maximum is not
// exceeded, 'dropped' will remain unchanged. 'dropped' may be omitted or
// nullptr, in which case it is not filled in.
// Requires: T is CopyAssignable, Swappable
void push(const T &v) { push(v, nullptr); }
void push(const T &v, T *dropped) { PushInternal(v, dropped); }
// Move overloads of push.
// Requires: T is MoveAssignable, Swappable
void push(T &&v) { // NOLINT(build/c++11)
push(std::move(v), nullptr);
}
void push(T &&v, T *dropped) { // NOLINT(build/c++11)
PushInternal(std::move(v), dropped);
}
// Peeks the bottom result without calling Extract()
const T &peek_bottom();
// Extract the elements as a vector sorted in descending order. The caller
// assumes ownership of the vector and must delete it when done. This is a
// destructive operation. The only method that can be called immediately
// after Extract() is Reset().
std::vector<T> *Extract();
// Similar to Extract(), but makes no guarantees the elements are in sorted
// order. As with Extract(), the caller assumes ownership of the vector and
// must delete it when done. This is a destructive operation. The only
// method that can be called immediately after ExtractUnsorted() is Reset().
std::vector<T> *ExtractUnsorted();
// A non-destructive version of Extract(). Copy the elements in a new vector
// sorted in descending order and return it. The caller assumes ownership of
// the new vector and must delete it when done. After calling
// ExtractNondestructive(), the caller can continue to push() new elements.
std::vector<T> *ExtractNondestructive() const;
// A non-destructive version of Extract(). Copy the elements to a given
// vector sorted in descending order. After calling
// ExtractNondestructive(), the caller can continue to push() new elements.
// Note:
// 1. The given argument must to be allocated.
// 2. Any data contained in the vector prior to the call will be deleted
// from it. After the call the vector will contain only the elements
// from the data structure.
void ExtractNondestructive(std::vector<T> *output) const;
// A non-destructive version of ExtractUnsorted(). Copy the elements in a new
// vector and return it, with no guarantees the elements are in sorted order.
// The caller assumes ownership of the new vector and must delete it when
// done. After calling ExtractUnsortedNondestructive(), the caller can
// continue to push() new elements.
std::vector<T> *ExtractUnsortedNondestructive() const;
// A non-destructive version of ExtractUnsorted(). Copy the elements into
// a given vector, with no guarantees the elements are in sorted order.
// After calling ExtractUnsortedNondestructive(), the caller can continue
// to push() new elements.
// Note:
// 1. The given argument must to be allocated.
// 2. Any data contained in the vector prior to the call will be deleted
// from it. After the call the vector will contain only the elements
// from the data structure.
void ExtractUnsortedNondestructive(std::vector<T> *output) const;
// Return an iterator to the beginning (end) of the container,
// with no guarantees about the order of iteration. These iterators are
// invalidated by mutation of the data structure.
UnsortedIterator unsorted_begin() const { return elements_.begin(); }
UnsortedIterator unsorted_end() const { return elements_.begin() + size(); }
// Accessor for comparator template argument.
Cmp *comparator() { return &cmp_; }
// This removes all elements. If Extract() or ExtractUnsorted() have been
// called, this will put it back in an empty but useable state.
void Reset();
private:
template <typename U>
void PushInternal(U &&v, T *dropped); // NOLINT(build/c++11)
// elements_ can be in one of two states:
// elements_.size() <= limit_: elements_ is an unsorted vector of elements
// pushed so far.
// elements_.size() > limit_: The last element of elements_ is unused;
// the other elements of elements_ are an stl heap whose size is exactly
// limit_. In this case elements_.size() is exactly one greater than
// limit_, but don't use "elements_.size() == limit_ + 1" to check for
// that because you'll get a false positive if limit_ == size_t(-1).
std::vector<T> elements_;
size_t limit_; // Maximum number of elements to find
Cmp cmp_; // Greater-than comparison function
State state_ = UNORDERED;
};
// ----------------------------------------------------------------------
// Implementations of non-inline functions
template <class T, class Cmp>
template <typename U>
void TopN<T, Cmp>::PushInternal(U &&v, T *dropped) { // NOLINT(build/c++11)
if (limit_ == 0) {
if (dropped) *dropped = std::forward<U>(v); // NOLINT(build/c++11)
return;
}
if (state_ != HEAP_SORTED) {
elements_.push_back(std::forward<U>(v)); // NOLINT(build/c++11)
if (state_ == UNORDERED || cmp_(elements_.back(), elements_.front())) {
// Easy case: we just pushed the new element back
} else {
// To maintain the BOTTOM_KNOWN state, we need to make sure that
// the element at position 0 is always the smallest. So we put
// the new element at position 0 and push the original bottom
// element in the back.
// Warning: this code is subtle.
using std::swap;
swap(elements_.front(), elements_.back());
}
if (elements_.size() == limit_ + 1) {
// Transition from unsorted vector to a heap.
std::make_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.front());
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
state_ = HEAP_SORTED;
}
} else {
// Only insert the new element if it is greater than the least element.
if (cmp_(v, elements_.front())) {
elements_.back() = std::forward<U>(v); // NOLINT(build/c++11)
std::push_heap(elements_.begin(), elements_.end(), cmp_);
if (dropped) *dropped = std::move(elements_.front());
std::pop_heap(elements_.begin(), elements_.end(), cmp_);
} else {
if (dropped) *dropped = std::forward<U>(v); // NOLINT(build/c++11)
}
}
}
template <class T, class Cmp>
const T &TopN<T, Cmp>::peek_bottom() {
TFLITE_DCHECK(!empty());
if (state_ == UNORDERED) {
// We need to do a linear scan to find out the bottom element
int min_candidate = 0;
for (size_t i = 1; i < elements_.size(); ++i) {
if (cmp_(elements_[min_candidate], elements_[i])) {
min_candidate = i;
}
}
// By swapping the element at position 0 and the minimal
// element, we transition to the BOTTOM_KNOWN state
if (min_candidate != 0) {
using std::swap;
swap(elements_[0], elements_[min_candidate]);
}
state_ = BOTTOM_KNOWN;
}
return elements_.front();
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::Extract() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ != HEAP_SORTED) {
std::sort(out->begin(), out->end(), cmp_);
} else {
out->pop_back();
std::sort_heap(out->begin(), out->end(), cmp_);
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsorted() {
auto out = new std::vector<T>;
out->swap(elements_);
if (state_ == HEAP_SORTED) {
// Remove the limit_+1'th element.
out->pop_back();
}
return out;
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractNondestructive() const {
auto out = new std::vector<T>;
ExtractNondestructive(out);
return out;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractNondestructive(std::vector<T> *output) const {
TFLITE_DCHECK(output);
*output = elements_;
if (state_ != HEAP_SORTED) {
std::sort(output->begin(), output->end(), cmp_);
} else {
output->pop_back();
std::sort_heap(output->begin(), output->end(), cmp_);
}
}
template <class T, class Cmp>
std::vector<T> *TopN<T, Cmp>::ExtractUnsortedNondestructive() const {
auto elements = new std::vector<T>;
ExtractUnsortedNondestructive(elements);
return elements;
}
template <class T, class Cmp>
void TopN<T, Cmp>::ExtractUnsortedNondestructive(std::vector<T> *output) const {
TFLITE_DCHECK(output);
*output = elements_;
if (state_ == HEAP_SORTED) {
// Remove the limit_+1'th element.
output->pop_back();
}
}
template <class T, class Cmp>
void TopN<T, Cmp>::Reset() {
elements_.clear();
state_ = UNORDERED;
}
} // namespace gtl
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_KERNELS_TOP_N_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/kernels/top_n.h | C++ | apache-2.0 | 13,585 |
# TensorFlow ops for audio front-end processing.
load(
"//tensorflow:tensorflow.bzl",
"tf_copts",
"tf_custom_op_library",
"tf_gen_op_libs",
"tf_gen_op_wrapper_py",
"tf_opts_nortti_if_android",
"tf_py_test",
)
load("//tensorflow:tensorflow.bzl", "tf_custom_op_py_library")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)
cc_library(
name = "audio_microfrontend",
srcs = ["audio_microfrontend.cc"],
hdrs = ["audio_microfrontend.h"],
deps = [
"//tensorflow/lite:framework",
"//tensorflow/lite/experimental/microfrontend/lib:frontend",
"//tensorflow/lite/kernels:kernel_util",
"//tensorflow/lite/kernels/internal:reference",
"@flatbuffers",
],
)
cc_library(
name = "audio_microfrontend_op_lib",
srcs = ["ops/audio_microfrontend_op.cc"],
copts = tf_copts(android_optimization_level_override = None) + tf_opts_nortti_if_android() + [
"-Wno-narrowing",
"-Wno-sign-compare",
"-Wno-overloaded-virtual",
] + select({
"//tensorflow:android": [
# Selective registration uses constexprs with recursive
# string comparisons; that can lead to compiler errors, so
# we increase the constexpr recursion depth.
"-fconstexpr-depth=1024",
"-Oz",
],
"//conditions:default": [],
}),
deps = [
"//tensorflow/lite/experimental/microfrontend/lib:frontend",
] + select({
"//tensorflow:android": [
"//tensorflow/core:portable_tensorflow_lib_lite",
],
"//conditions:default": [
"//tensorflow/core:framework",
"//tensorflow/core:lib",
],
}),
alwayslink = 1,
)
cc_test(
name = "audio_microfrontend_test",
size = "small",
srcs = ["audio_microfrontend_test.cc"],
tags = ["tflite_not_portable_ios"],
deps = [
":audio_microfrontend",
"//tensorflow/lite:framework",
"//tensorflow/lite/kernels:test_util",
"@com_google_googletest//:gtest_main",
"@flatbuffers",
],
)
tf_custom_op_library(
name = "python/ops/_audio_microfrontend_op.so",
srcs = [
"ops/audio_microfrontend_op.cc",
],
deps = [
"//tensorflow/lite/experimental/microfrontend/lib:frontend",
],
)
tf_gen_op_libs(
op_lib_names = ["audio_microfrontend_op"],
deps = [
"//tensorflow/core:lib",
"//tensorflow/lite/experimental/microfrontend/lib:frontend",
],
)
tf_gen_op_wrapper_py(
name = "audio_microfrontend_op",
deps = [":audio_microfrontend_op_op_lib"],
)
tf_custom_op_py_library(
name = "audio_microfrontend_py",
srcs = [
"python/ops/audio_microfrontend_op.py",
],
dso = [":python/ops/_audio_microfrontend_op.so"],
kernels = [
":audio_microfrontend_op_op_lib",
],
srcs_version = "PY3",
deps = [
":audio_microfrontend_op",
"//tensorflow/python:array_ops",
"//tensorflow/python:client_testlib",
"//tensorflow/python:constant_op",
"//tensorflow/python:control_flow_ops",
"//tensorflow/python:framework_for_generated_wrappers",
"//tensorflow/python:linalg_ops",
"//tensorflow/python:math_ops",
"//tensorflow/python:platform",
"//tensorflow/python:util",
],
)
tf_py_test(
name = "audio_microfrontend_op_test",
size = "small",
srcs = ["python/kernel_tests/audio_microfrontend_op_test.py"],
tags = ["no_pip"],
deps = [
":audio_microfrontend_py",
"//tensorflow:tensorflow_py",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/BUILD | Starlark | apache-2.0 | 3,663 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "flatbuffers/flexbuffers.h" // from @flatbuffers
#include "tensorflow/lite/context.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace ops {
namespace custom {
namespace audio_microfrontend {
constexpr int kInputTensor = 0;
constexpr int kOutputTensor = 0;
typedef struct {
int sample_rate;
FrontendState* state;
int left_context;
int right_context;
int frame_stride;
bool zero_padding;
int out_scale;
bool out_float;
} TfLiteAudioMicrofrontendParams;
void* Init(TfLiteContext* context, const char* buffer, size_t length) {
auto* data = new TfLiteAudioMicrofrontendParams;
const uint8_t* buffer_t = reinterpret_cast<const uint8_t*>(buffer);
const flexbuffers::Map& m = flexbuffers::GetRoot(buffer_t, length).AsMap();
data->sample_rate = m["sample_rate"].AsInt32();
struct FrontendConfig config;
config.window.size_ms = m["window_size"].AsInt32();
config.window.step_size_ms = m["window_step"].AsInt32();
config.filterbank.num_channels = m["num_channels"].AsInt32();
config.filterbank.upper_band_limit = m["upper_band_limit"].AsFloat();
config.filterbank.lower_band_limit = m["lower_band_limit"].AsFloat();
config.noise_reduction.smoothing_bits = m["smoothing_bits"].AsInt32();
config.noise_reduction.even_smoothing = m["even_smoothing"].AsFloat();
config.noise_reduction.odd_smoothing = m["odd_smoothing"].AsFloat();
config.noise_reduction.min_signal_remaining =
m["min_signal_remaining"].AsFloat();
config.pcan_gain_control.enable_pcan = m["enable_pcan"].AsBool();
config.pcan_gain_control.strength = m["pcan_strength"].AsFloat();
config.pcan_gain_control.offset = m["pcan_offset"].AsFloat();
config.pcan_gain_control.gain_bits = m["gain_bits"].AsInt32();
config.log_scale.enable_log = m["enable_log"].AsBool();
config.log_scale.scale_shift = m["scale_shift"].AsInt32();
data->state = new FrontendState;
FrontendPopulateState(&config, data->state, data->sample_rate);
data->left_context = m["left_context"].AsInt32();
data->right_context = m["right_context"].AsInt32();
data->frame_stride = m["frame_stride"].AsInt32();
data->zero_padding = m["zero_padding"].AsBool();
data->out_scale = m["out_scale"].AsInt32();
data->out_float = m["out_float"].AsBool();
return data;
}
void Free(TfLiteContext* context, void* buffer) {
auto* data = reinterpret_cast<TfLiteAudioMicrofrontendParams*>(buffer);
FrontendFreeStateContents(data->state);
delete data->state;
delete data;
}
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
TF_LITE_ENSURE_EQ(context, NumInputs(node), 1);
TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
TF_LITE_ENSURE_EQ(context, NumDimensions(input), 1);
TF_LITE_ENSURE_EQ(context, input->type, kTfLiteInt16);
output->type = kTfLiteInt32;
if (data->out_float) {
output->type = kTfLiteFloat32;
}
TfLiteIntArray* output_size = TfLiteIntArrayCreate(2);
int num_frames = 0;
if (input->dims->data[0] >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step / data->frame_stride +
1;
}
output_size->data[0] = num_frames;
output_size->data[1] = data->state->filterbank.num_channels *
(1 + data->left_context + data->right_context);
return context->ResizeTensor(context, output, output_size);
}
template <typename T>
void GenerateFeatures(TfLiteAudioMicrofrontendParams* data,
const TfLiteTensor* input, TfLiteTensor* output) {
const int16_t* audio_data = GetTensorData<int16_t>(input);
int64_t audio_size = input->dims->data[0];
T* filterbanks_flat = GetTensorData<T>(output);
int num_frames = 0;
if (audio_size >= data->state->window.size) {
num_frames = (input->dims->data[0] - data->state->window.size) /
data->state->window.step +
1;
}
std::vector<std::vector<T>> frame_buffer(num_frames);
int frame_index = 0;
while (audio_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
data->state, audio_data, audio_size, &num_samples_read);
audio_data += num_samples_read;
audio_size -= num_samples_read;
if (output.values != nullptr) {
frame_buffer[frame_index].reserve(output.size);
int i;
for (i = 0; i < output.size; ++i) {
frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) /
data->out_scale);
}
++frame_index;
}
}
int index = 0;
std::vector<T> pad(data->state->filterbank.num_channels, 0);
int anchor;
for (anchor = 0; anchor < frame_buffer.size(); anchor += data->frame_stride) {
int frame;
for (frame = anchor - data->left_context;
frame <= anchor + data->right_context; ++frame) {
std::vector<T>* feature;
if (data->zero_padding && (frame < 0 || frame >= frame_buffer.size())) {
feature = &pad;
} else if (frame < 0) {
feature = &frame_buffer[0];
} else if (frame >= frame_buffer.size()) {
feature = &frame_buffer[frame_buffer.size() - 1];
} else {
feature = &frame_buffer[frame];
}
for (auto f : *feature) {
filterbanks_flat[index++] = f;
}
}
}
}
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) {
auto* data =
reinterpret_cast<TfLiteAudioMicrofrontendParams*>(node->user_data);
FrontendReset(data->state);
const TfLiteTensor* input;
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input));
TfLiteTensor* output;
TF_LITE_ENSURE_OK(context,
GetOutputSafe(context, node, kOutputTensor, &output));
if (data->out_float) {
GenerateFeatures<float>(data, input, output);
} else {
GenerateFeatures<int32>(data, input, output);
}
return kTfLiteOk;
}
} // namespace audio_microfrontend
TfLiteRegistration* Register_AUDIO_MICROFRONTEND() {
static TfLiteRegistration r = {
audio_microfrontend::Init, audio_microfrontend::Free,
audio_microfrontend::Prepare, audio_microfrontend::Eval};
return &r;
}
} // namespace custom
} // namespace ops
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/audio_microfrontend.cc | C++ | apache-2.0 | 7,472 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_AUDIO_MICROFRONTEND_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_AUDIO_MICROFRONTEND_H_
#include "tensorflow/lite/context.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_AUDIO_MICROFRONTEND();
} // namespace custom
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_AUDIO_MICROFRONTEND_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/audio_microfrontend.h | C++ | apache-2.0 | 1,107 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Unit test for TFLite Micro Frontend op.
#include "tensorflow/lite/experimental/microfrontend/audio_microfrontend.h"
#include <memory>
#include <vector>
#include <gmock/gmock.h>
#include <gtest/gtest.h>
#include "flatbuffers/flexbuffers.h" // from @flatbuffers
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/test_util.h"
#include "tensorflow/lite/model.h"
namespace tflite {
namespace ops {
namespace custom {
namespace {
using ::testing::ElementsAreArray;
class MicroFrontendOpModel : public SingleOpModel {
public:
MicroFrontendOpModel(int n_input, int n_frame, int n_frequency_per_frame,
int n_left_context, int n_right_context,
int n_frame_stride,
const std::vector<std::vector<int>>& input_shapes)
: n_input_(n_input),
n_frame_(n_frame),
n_frequency_per_frame_(n_frequency_per_frame),
n_left_context_(n_left_context),
n_right_context_(n_right_context),
n_frame_stride_(n_frame_stride) {
input_ = AddInput(TensorType_INT16);
output_ = AddOutput(TensorType_INT32);
// Set up and pass in custom options using flexbuffer.
flexbuffers::Builder fbb;
fbb.Map([&]() {
// Parameters to initialize FFT state.
fbb.Int("sample_rate", 1000);
fbb.Int("window_size", 25);
fbb.Int("window_step", 10);
fbb.Int("num_channels", 2);
fbb.Float("upper_band_limit", 450.0);
fbb.Float("lower_band_limit", 8.0);
fbb.Int("smoothing_bits", 10);
fbb.Float("even_smoothing", 0.025);
fbb.Float("odd_smoothing", 0.06);
fbb.Float("min_signal_remaining", 0.05);
fbb.Bool("enable_pcan", true);
fbb.Float("pcan_strength", 0.95);
fbb.Float("pcan_offset", 80.0);
fbb.Int("gain_bits", 21);
fbb.Bool("enable_log", true);
fbb.Int("scale_shift", 6);
// Parameters for micro frontend.
fbb.Int("left_context", n_left_context);
fbb.Int("right_context", n_right_context);
fbb.Int("frame_stride", n_frame_stride);
fbb.Bool("zero_padding", true);
fbb.Int("out_scale", 1);
fbb.Bool("out_float", false);
});
fbb.Finish();
SetCustomOp("MICRO_FRONTEND", fbb.GetBuffer(),
Register_AUDIO_MICROFRONTEND);
BuildInterpreter(input_shapes);
}
void SetInput(const std::vector<int16_t>& data) {
PopulateTensor(input_, data);
}
std::vector<int> GetOutput() { return ExtractVector<int>(output_); }
int num_inputs() { return n_input_; }
int num_frmes() { return n_frame_; }
int num_frequency_per_frame() { return n_frequency_per_frame_; }
int num_left_context() { return n_left_context_; }
int num_right_context() { return n_right_context_; }
int num_frame_stride() { return n_frame_stride_; }
protected:
int input_;
int output_;
int n_input_;
int n_frame_;
int n_frequency_per_frame_;
int n_left_context_;
int n_right_context_;
int n_frame_stride_;
};
class BaseMicroFrontendTest : public ::testing::Test {
protected:
// Micro frontend input.
std::vector<int16_t> micro_frontend_input_;
// Compares output up to tolerance to the result of the micro_frontend given
// the input.
void VerifyGoldens(const std::vector<int16_t>& input,
const std::vector<std::vector<int>>& output,
MicroFrontendOpModel* micro_frontend,
float tolerance = 1e-5) {
// Dimensionality check.
const int num_inputs = micro_frontend->num_inputs();
EXPECT_GT(num_inputs, 0);
const int num_frames = micro_frontend->num_frmes();
EXPECT_GT(num_frames, 0);
EXPECT_EQ(num_frames, output.size());
const int num_frequency_per_frame =
micro_frontend->num_frequency_per_frame();
EXPECT_GT(num_frequency_per_frame, 0);
EXPECT_EQ(num_frequency_per_frame, output[0].size());
// Set up input.
micro_frontend->SetInput(input);
// Call Invoke.
micro_frontend->Invoke();
// Mimic padding behaviour with zero_padding = true.
std::vector<int> output_flattened;
int anchor;
for (anchor = 0; anchor < output.size();
anchor += micro_frontend->num_frame_stride()) {
int frame;
for (frame = anchor - micro_frontend->num_left_context();
frame <= anchor + micro_frontend->num_right_context(); ++frame) {
if (frame < 0 || frame >= output.size()) {
// Padding with zeros.
int j;
for (j = 0; j < num_frequency_per_frame; ++j) {
output_flattened.push_back(0.0);
}
} else {
// Copy real output.
for (auto data_point : output[frame]) {
output_flattened.push_back(data_point);
}
}
}
}
// Validate result.
EXPECT_THAT(micro_frontend->GetOutput(),
ElementsAreArray(output_flattened));
}
}; // namespace
class TwoConsecutive36InputsMicroFrontendTest : public BaseMicroFrontendTest {
void SetUp() override {
micro_frontend_input_ = {
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768};
}
};
TEST_F(TwoConsecutive36InputsMicroFrontendTest, MicroFrontendBlackBoxTest) {
const int n_input = 36;
const int n_frame = 2;
const int n_frequency_per_frame = 2;
MicroFrontendOpModel micro_frontend(n_input, n_frame, n_frequency_per_frame,
1, 1, 1,
{
{n_input},
});
// Verify the final output.
const std::vector<std::vector<int>> micro_frontend_golden_output = {
{479, 425}, {436, 378}};
VerifyGoldens(micro_frontend_input_, micro_frontend_golden_output,
µ_frontend);
}
} // namespace
} // namespace custom
} // namespace ops
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/audio_microfrontend_test.cc | C++ | apache-2.0 | 6,702 |
# Library for generating feature vectors from audio data
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)
cc_library(
name = "bits",
hdrs = ["bits.h"],
)
cc_library(
name = "fft",
srcs = [
"fft.cc",
"fft_util.cc",
],
hdrs = [
"fft.h",
"fft_util.h",
],
deps = [
"@kissfft//:kiss_fftr_16",
],
)
cc_library(
name = "filterbank",
srcs = [
"filterbank.c",
"filterbank_util.c",
],
hdrs = [
"filterbank.h",
"filterbank_util.h",
],
deps = [
":bits",
":fft",
],
)
cc_library(
name = "frontend",
srcs = [
"frontend.c",
"frontend_util.c",
],
hdrs = [
"frontend.h",
"frontend_util.h",
],
deps = [
":bits",
":fft",
":filterbank",
":log_scale",
":noise_reduction",
":pcan_gain_control",
":window",
],
)
cc_library(
name = "log_scale",
srcs = [
"log_lut.c",
"log_scale.c",
"log_scale_util.c",
],
hdrs = [
"log_lut.h",
"log_scale.h",
"log_scale_util.h",
],
deps = [
":bits",
],
)
cc_library(
name = "noise_reduction",
srcs = [
"noise_reduction.c",
"noise_reduction_util.c",
],
hdrs = [
"noise_reduction.h",
"noise_reduction_util.h",
],
)
cc_library(
name = "pcan_gain_control",
srcs = [
"pcan_gain_control.c",
"pcan_gain_control_util.c",
],
hdrs = [
"pcan_gain_control.h",
"pcan_gain_control_util.h",
],
deps = [
":bits",
],
)
cc_library(
name = "window",
srcs = [
"window.c",
"window_util.c",
],
hdrs = [
"window.h",
"window_util.h",
],
)
cc_test(
name = "fft_test",
srcs = ["fft_test.cc"],
deps = [
":fft",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "filterbank_test",
srcs = ["filterbank_test.cc"],
# Setting copts for experimental code to [], but this code should be fixed
# to build with the default copts (micro_copts())
copts = [],
deps = [
":filterbank",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "frontend_test",
srcs = ["frontend_test.cc"],
# Setting copts for experimental code to [], but this code should be fixed
# to build with the default copts (micro_copts())
copts = [],
deps = [
":frontend",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "log_scale_test",
srcs = ["log_scale_test.cc"],
# Setting copts for experimental code to [], but this code should be fixed
# to build with the default copts (micro_copts())
copts = [],
deps = [
":log_scale",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "noise_reduction_test",
srcs = ["noise_reduction_test.cc"],
# Setting copts for experimental code to [], but this code should be fixed
# to build with the default copts (micro_copts())
copts = [],
deps = [
":noise_reduction",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "pcan_gain_control_test",
srcs = ["pcan_gain_control_test.cc"],
# Setting copts for experimental code to [], but this code should be fixed
# to build with the default copts (micro_copts())
copts = [],
deps = [
":pcan_gain_control",
"//tensorflow/lite/micro/testing:micro_test",
],
)
cc_test(
name = "window_test",
srcs = ["window_test.cc"],
# Setting copts for experimental code to [], but this code should be fixed
# to build with the default copts (micro_copts())
copts = [],
deps = [
":window",
"//tensorflow/lite/micro/testing:micro_test",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/BUILD | Starlark | apache-2.0 | 4,001 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
#ifdef __cplusplus
#include <cstdint>
extern "C" {
#endif
static inline int CountLeadingZeros32Slow(uint64_t n) {
int zeroes = 28;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
static inline int CountLeadingZeros32(uint32_t n) {
#if defined(_MSC_VER)
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse(&result, n)) {
return 31 - result;
}
return 32;
#elif defined(__GNUC__)
// Handle 0 as a special case because __builtin_clz(0) is undefined.
if (n == 0) {
return 32;
}
return __builtin_clz(n);
#else
return CountLeadingZeros32Slow(n);
#endif
}
static inline int MostSignificantBit32(uint32_t n) {
return 32 - CountLeadingZeros32(n);
}
static inline int CountLeadingZeros64Slow(uint64_t n) {
int zeroes = 60;
if (n >> 32) zeroes -= 32, n >>= 32;
if (n >> 16) zeroes -= 16, n >>= 16;
if (n >> 8) zeroes -= 8, n >>= 8;
if (n >> 4) zeroes -= 4, n >>= 4;
return "\4\3\2\2\1\1\1\1\0\0\0\0\0\0\0"[n] + zeroes;
}
static inline int CountLeadingZeros64(uint64_t n) {
#if defined(_MSC_VER) && defined(_M_X64)
// MSVC does not have __builtin_clzll. Use _BitScanReverse64.
unsigned long result = 0; // NOLINT(runtime/int)
if (_BitScanReverse64(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(_MSC_VER)
// MSVC does not have __builtin_clzll. Compose two calls to _BitScanReverse
unsigned long result = 0; // NOLINT(runtime/int)
if ((n >> 32) && _BitScanReverse(&result, n >> 32)) {
return 31 - result;
}
if (_BitScanReverse(&result, n)) {
return 63 - result;
}
return 64;
#elif defined(__GNUC__)
// Handle 0 as a special case because __builtin_clzll(0) is undefined.
if (n == 0) {
return 64;
}
return __builtin_clzll(n);
#else
return CountLeadingZeros64Slow(n);
#endif
}
static inline int MostSignificantBit64(uint64_t n) {
return 64 - CountLeadingZeros64(n);
}
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_BITS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/bits.h | C++ | apache-2.0 | 2,916 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
#include <string.h>
#define FIXED_POINT 16
#include "kiss_fft.h"
#include "tools/kiss_fftr.h"
void FftCompute(struct FftState* state, const int16_t* input,
int input_scale_shift) {
const size_t input_size = state->input_size;
const size_t fft_size = state->fft_size;
int16_t* fft_input = state->input;
// First, scale the input by the given shift.
size_t i;
for (i = 0; i < input_size; ++i) {
fft_input[i] = static_cast<int16_t>(static_cast<uint16_t>(input[i])
<< input_scale_shift);
}
// Zero out whatever else remains in the top part of the input.
for (; i < fft_size; ++i) {
fft_input[i] = 0;
}
// Apply the FFT.
kiss_fftr(reinterpret_cast<kiss_fftr_cfg>(state->scratch),
state->input,
reinterpret_cast<kiss_fft_cpx*>(state->output));
}
void FftInit(struct FftState* state) {
// All the initialization is done in FftPopulateState()
}
void FftReset(struct FftState* state) {
memset(state->input, 0, state->fft_size * sizeof(*state->input));
memset(state->output, 0, (state->fft_size / 2 + 1) * sizeof(*state->output));
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft.cc | C++ | apache-2.0 | 1,881 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
#include <stdint.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
struct complex_int16_t {
int16_t real;
int16_t imag;
};
struct FftState {
int16_t* input;
struct complex_int16_t* output;
size_t fft_size;
size_t input_size;
void* scratch;
size_t scratch_size;
};
void FftCompute(struct FftState* state, const int16_t* input,
int input_scale_shift);
void FftInit(struct FftState* state);
void FftReset(struct FftState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft.h | C | apache-2.0 | 1,381 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/fft_io.h"
void FftWriteMemmapPreamble(FILE* fp, const struct FftState* state) {
fprintf(fp, "static int16_t fft_input[%zu];\n", state->fft_size);
fprintf(fp, "static struct complex_int16_t fft_output[%zu];\n",
state->fft_size / 2 + 1);
fprintf(fp, "static char fft_scratch[%zu];\n", state->scratch_size);
fprintf(fp, "\n");
}
void FftWriteMemmap(FILE* fp, const struct FftState* state,
const char* variable) {
fprintf(fp, "%s->input = fft_input;\n", variable);
fprintf(fp, "%s->output = fft_output;\n", variable);
fprintf(fp, "%s->fft_size = %zu;\n", variable, state->fft_size);
fprintf(fp, "%s->input_size = %zu;\n", variable, state->input_size);
fprintf(fp, "%s->scratch = fft_scratch;\n", variable);
fprintf(fp, "%s->scratch_size = %zu;\n", variable, state->scratch_size);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft_io.c | C | apache-2.0 | 1,556 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_IO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_IO_H_
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
#ifdef __cplusplus
extern "C" {
#endif
void FftWriteMemmapPreamble(FILE* fp, const struct FftState* state);
void FftWriteMemmap(FILE* fp, const struct FftState* state,
const char* variable);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_IO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft_io.h | C | apache-2.0 | 1,210 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int16_t kFakeWindow[] = {
0, 1151, 0, -5944, 0, 13311, 0, -21448, 0, 28327, 0, -32256, 0, 32255,
0, -28328, 0, 21447, 0, -13312, 0, 5943, 0, -1152, 0};
const int kScaleShift = 0;
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(FftTest_CheckOutputValues) {
struct FftState state;
TF_LITE_MICRO_EXPECT(
FftPopulateState(&state, sizeof(kFakeWindow) / sizeof(kFakeWindow[0])));
FftInit(&state);
FftCompute(&state, kFakeWindow, kScaleShift);
const struct complex_int16_t expected[] = {
{0, 0}, {-10, 9}, {-20, 0}, {-9, -10}, {0, 25}, {-119, 119},
{-887, 0}, {3000, 3000}, {0, -6401}, {-3000, 3000}, {886, 0}, {118, 119},
{0, 25}, {9, -10}, {19, 0}, {9, 9}, {0, 0}};
TF_LITE_MICRO_EXPECT_EQ(state.fft_size / 2 + 1,
sizeof(expected) / sizeof(expected[0]));
unsigned int i;
for (i = 0; i <= state.fft_size / 2; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.output[i].real, expected[i].real);
TF_LITE_MICRO_EXPECT_EQ(state.output[i].imag, expected[i].imag);
}
FftFreeStateContents(&state);
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft_test.cc | C++ | apache-2.0 | 2,016 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
#include <stdio.h>
#define FIXED_POINT 16
#include "kiss_fft.h"
#include "tools/kiss_fftr.h"
int FftPopulateState(struct FftState* state, size_t input_size) {
state->input_size = input_size;
state->fft_size = 1;
while (state->fft_size < state->input_size) {
state->fft_size <<= 1;
}
state->input = reinterpret_cast<int16_t*>(
malloc(state->fft_size * sizeof(*state->input)));
if (state->input == nullptr) {
fprintf(stderr, "Failed to alloc fft input buffer\n");
return 0;
}
state->output = reinterpret_cast<complex_int16_t*>(
malloc((state->fft_size / 2 + 1) * sizeof(*state->output) * 2));
if (state->output == nullptr) {
fprintf(stderr, "Failed to alloc fft output buffer\n");
return 0;
}
// Ask kissfft how much memory it wants.
size_t scratch_size = 0;
kiss_fftr_cfg kfft_cfg = kiss_fftr_alloc(
state->fft_size, 0, nullptr, &scratch_size);
if (kfft_cfg != nullptr) {
fprintf(stderr, "Kiss memory sizing failed.\n");
return 0;
}
state->scratch = malloc(scratch_size);
if (state->scratch == nullptr) {
fprintf(stderr, "Failed to alloc fft scratch buffer\n");
return 0;
}
state->scratch_size = scratch_size;
// Let kissfft configure the scratch space we just allocated
kfft_cfg = kiss_fftr_alloc(state->fft_size, 0,
state->scratch, &scratch_size);
if (kfft_cfg != state->scratch) {
fprintf(stderr, "Kiss memory preallocation strategy failed.\n");
return 0;
}
return 1;
}
void FftFreeStateContents(struct FftState* state) {
free(state->input);
free(state->output);
free(state->scratch);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft_util.cc | C++ | apache-2.0 | 2,396 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
#ifdef __cplusplus
extern "C" {
#endif
// Prepares and FFT for the given input size.
int FftPopulateState(struct FftState* state, size_t input_size);
// Frees any allocated buffers.
void FftFreeStateContents(struct FftState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FFT_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/fft_util.h | C | apache-2.0 | 1,219 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
#include <string.h>
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
struct complex_int16_t* fft_output,
int32_t* energy) {
const int end_index = state->end_index;
int i;
energy += state->start_index;
fft_output += state->start_index;
for (i = state->start_index; i < end_index; ++i) {
const int32_t real = fft_output->real;
const int32_t imag = fft_output->imag;
fft_output++;
const uint32_t mag_squared = (real * real) + (imag * imag);
*energy++ = mag_squared;
}
}
void FilterbankAccumulateChannels(struct FilterbankState* state,
const int32_t* energy) {
uint64_t* work = state->work;
uint64_t weight_accumulator = 0;
uint64_t unweight_accumulator = 0;
const int16_t* channel_frequency_starts = state->channel_frequency_starts;
const int16_t* channel_weight_starts = state->channel_weight_starts;
const int16_t* channel_widths = state->channel_widths;
int num_channels_plus_1 = state->num_channels + 1;
int i;
for (i = 0; i < num_channels_plus_1; ++i) {
const int32_t* magnitudes = energy + *channel_frequency_starts++;
const int16_t* weights = state->weights + *channel_weight_starts;
const int16_t* unweights = state->unweights + *channel_weight_starts++;
const int width = *channel_widths++;
int j;
for (j = 0; j < width; ++j) {
weight_accumulator += *weights++ * ((uint64_t)*magnitudes);
unweight_accumulator += *unweights++ * ((uint64_t)*magnitudes);
++magnitudes;
}
*work++ = weight_accumulator;
weight_accumulator = unweight_accumulator;
unweight_accumulator = 0;
}
}
static uint16_t Sqrt32(uint32_t num) {
if (num == 0) {
return 0;
}
uint32_t res = 0;
int max_bit_number = 32 - MostSignificantBit32(num);
max_bit_number |= 1;
uint32_t bit = 1U << (31 - max_bit_number);
int iterations = (31 - max_bit_number) / 2 + 1;
while (iterations--) {
if (num >= res + bit) {
num -= res + bit;
res = (res >> 1U) + bit;
} else {
res >>= 1U;
}
bit >>= 2U;
}
// Do rounding - if we have the bits.
if (num > res && res != 0xFFFF) {
++res;
}
return res;
}
static uint32_t Sqrt64(uint64_t num) {
// Take a shortcut and just use 32 bit operations if the upper word is all
// clear. This will cause a slight off by one issue for numbers close to 2^32,
// but it probably isn't going to matter (and gives us a big performance win).
if ((num >> 32) == 0) {
return Sqrt32((uint32_t)num);
}
uint64_t res = 0;
int max_bit_number = 64 - MostSignificantBit64(num);
max_bit_number |= 1;
uint64_t bit = 1ULL << (63 - max_bit_number);
int iterations = (63 - max_bit_number) / 2 + 1;
while (iterations--) {
if (num >= res + bit) {
num -= res + bit;
res = (res >> 1U) + bit;
} else {
res >>= 1U;
}
bit >>= 2U;
}
// Do rounding - if we have the bits.
if (num > res && res != 0xFFFFFFFFLL) {
++res;
}
return res;
}
uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift) {
const int num_channels = state->num_channels;
const uint64_t* work = state->work + 1;
// Reuse the work buffer since we're fine clobbering it at this point to hold
// the output.
uint32_t* output = (uint32_t*)state->work;
int i;
for (i = 0; i < num_channels; ++i) {
*output++ = Sqrt64(*work++) >> scale_down_shift;
}
return (uint32_t*)state->work;
}
void FilterbankReset(struct FilterbankState* state) {
memset(state->work, 0, (state->num_channels + 1) * sizeof(*state->work));
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank.c | C | apache-2.0 | 4,482 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
#include <stdint.h>
#include <stdlib.h>
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
#define kFilterbankBits 12
#ifdef __cplusplus
extern "C" {
#endif
struct FilterbankState {
int num_channels;
int start_index;
int end_index;
int16_t* channel_frequency_starts;
int16_t* channel_weight_starts;
int16_t* channel_widths;
int16_t* weights;
int16_t* unweights;
uint64_t* work;
};
// Converts the relevant complex values of an FFT output into energy (the
// square magnitude).
void FilterbankConvertFftComplexToEnergy(struct FilterbankState* state,
struct complex_int16_t* fft_output,
int32_t* energy);
// Computes the mel-scale filterbank on the given energy array. Output is cached
// internally - to fetch it, you need to call FilterbankSqrt.
void FilterbankAccumulateChannels(struct FilterbankState* state,
const int32_t* energy);
// Applies an integer square root to the 64 bit intermediate values of the
// filterbank, and returns a pointer to them. Memory will be invalidated the
// next time FilterbankAccumulateChannels is called.
uint32_t* FilterbankSqrt(struct FilterbankState* state, int scale_down_shift);
void FilterbankReset(struct FilterbankState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank.h | C | apache-2.0 | 2,252 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h"
static void PrintArray(FILE* fp, const char* name, const int16_t* values,
size_t size) {
fprintf(fp, "static int16_t filterbank_%s[] = {", name);
int i;
for (i = 0; i < size; ++i) {
fprintf(fp, "%d", values[i]);
if (i < size - 1) {
fprintf(fp, ", ");
}
}
fprintf(fp, "};\n");
}
void FilterbankWriteMemmapPreamble(FILE* fp,
const struct FilterbankState* state) {
const int num_channels_plus_1 = state->num_channels + 1;
PrintArray(fp, "channel_frequency_starts", state->channel_frequency_starts,
num_channels_plus_1);
PrintArray(fp, "channel_weight_starts", state->channel_weight_starts,
num_channels_plus_1);
PrintArray(fp, "channel_widths", state->channel_widths, num_channels_plus_1);
int num_weights = 0;
int i;
for (i = 0; i < num_channels_plus_1; ++i) {
num_weights += state->channel_widths[i];
}
PrintArray(fp, "weights", state->weights, num_weights);
PrintArray(fp, "unweights", state->unweights, num_weights);
fprintf(fp, "static uint64_t filterbank_work[%d];\n", num_channels_plus_1);
fprintf(fp, "\n");
}
void FilterbankWriteMemmap(FILE* fp, const struct FilterbankState* state,
const char* variable) {
fprintf(fp, "%s->num_channels = %d;\n", variable, state->num_channels);
fprintf(fp, "%s->start_index = %d;\n", variable, state->start_index);
fprintf(fp, "%s->end_index = %d;\n", variable, state->end_index);
fprintf(
fp,
"%s->channel_frequency_starts = filterbank_channel_frequency_starts;\n",
variable);
fprintf(fp, "%s->channel_weight_starts = filterbank_channel_weight_starts;\n",
variable);
fprintf(fp, "%s->channel_widths = filterbank_channel_widths;\n", variable);
fprintf(fp, "%s->weights = filterbank_weights;\n", variable);
fprintf(fp, "%s->unweights = filterbank_unweights;\n", variable);
fprintf(fp, "%s->work = filterbank_work;\n", variable);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.c | C | apache-2.0 | 2,732 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_IO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_IO_H_
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
#ifdef __cplusplus
extern "C" {
#endif
void FilterbankWriteMemmapPreamble(FILE* fp,
const struct FilterbankState* state);
void FilterbankWriteMemmap(FILE* fp, const struct FilterbankState* state,
const char* variable);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_IO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h | C | apache-2.0 | 1,308 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
#include <cstring>
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int kSampleRate = 1000;
const int kSpectrumSize = 17;
const int kStartIndex = 1;
const int kEndIndex = 15;
const int32_t kEnergy[] = {-1, 181, 400, 181, 625, 28322,
786769, 18000000, 40972801, 18000000, 784996, 28085,
625, 181, 361, -1, -1};
const uint64_t kWork[] = {1835887, 61162970173, 258694800000};
const int kScaleShift = 0;
// Test filterbank generation using scaled-down defaults.
class FilterbankTestConfig {
public:
FilterbankTestConfig() {
config_.num_channels = 2;
config_.lower_band_limit = 8.0;
config_.upper_band_limit = 450.0;
}
struct FilterbankConfig config_;
};
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(FilterbankTest_CheckStartIndex) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
TF_LITE_MICRO_EXPECT_EQ(state.start_index, kStartIndex);
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckEndIndex) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
TF_LITE_MICRO_EXPECT_EQ(state.end_index, kEndIndex);
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelFrequencyStarts) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
const int16_t expected[] = {0, 4, 8};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.channel_frequency_starts[i], expected[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWeightStarts) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
const int16_t expected[] = {0, 8, 16};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[i], expected[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckChannelWidths) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
const int16_t expected[] = {8, 8, 8};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.channel_widths[i], expected[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckWeights) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
const int16_t expected[] = {0, 3277, 2217, 1200, 222, 0, 0, 0,
0, 3376, 2468, 1591, 744, 0, 0, 0,
0, 4020, 3226, 2456, 1708, 983, 277, 0};
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] +
state.channel_widths[state.num_channels],
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.weights[i], expected[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckUnweights) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
const int16_t expected[] = {0, 819, 1879, 2896, 3874, 0, 0, 0,
0, 720, 1628, 2505, 3352, 0, 0, 0,
0, 76, 870, 1640, 2388, 3113, 3819, 0};
TF_LITE_MICRO_EXPECT_EQ(state.channel_weight_starts[state.num_channels] +
state.channel_widths[state.num_channels],
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.unweights[i], expected[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckConvertFftComplexToEnergy) {
struct FilterbankState state;
state.start_index = kStartIndex;
state.end_index = kEndIndex;
struct complex_int16_t fake_fft[] = {
{0, 0}, {-10, 9}, {-20, 0}, {-9, -10}, {0, 25}, {-119, 119},
{-887, 0}, {3000, 3000}, {0, -6401}, {-3000, 3000}, {886, 0}, {118, 119},
{0, 25}, {9, -10}, {19, 0}, {9, 9}, {0, 0}};
int32_t* energy = reinterpret_cast<int32_t*>(fake_fft);
FilterbankConvertFftComplexToEnergy(&state, fake_fft, energy);
int i;
for (i = state.start_index; i < state.end_index; ++i) {
TF_LITE_MICRO_EXPECT_EQ(energy[i], kEnergy[i]);
}
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckAccumulateChannels) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
FilterbankAccumulateChannels(&state, kEnergy);
TF_LITE_MICRO_EXPECT_EQ(state.num_channels + 1,
sizeof(kWork) / sizeof(kWork[0]));
int i;
for (i = 0; i <= state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.work[i], kWork[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FilterbankTest_CheckSqrt) {
FilterbankTestConfig config;
struct FilterbankState state;
TF_LITE_MICRO_EXPECT(FilterbankPopulateState(&config.config_, &state,
kSampleRate, kSpectrumSize));
std::memcpy(state.work, kWork, sizeof(kWork));
uint32_t* scaled_filterbank = FilterbankSqrt(&state, kScaleShift);
const uint32_t expected[] = {247311, 508620};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(scaled_filterbank[i], expected[i]);
}
FilterbankFreeStateContents(&state);
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank_test.cc | C++ | apache-2.0 | 7,954 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
#include <assert.h>
#include <math.h>
#include <stdio.h>
#define kFilterbankIndexAlignment 4
#define kFilterbankChannelBlockSize 4
void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config) {
config->num_channels = 32;
config->lower_band_limit = 125.0f;
config->upper_band_limit = 7500.0f;
config->output_scale_shift = 7;
}
static float FreqToMel(float freq) { return 1127.0 * log1p(freq / 700.0); }
static void CalculateCenterFrequencies(const int num_channels,
const float lower_frequency_limit,
const float upper_frequency_limit,
float* center_frequencies) {
assert(lower_frequency_limit >= 0.0f);
assert(upper_frequency_limit > lower_frequency_limit);
const float mel_low = FreqToMel(lower_frequency_limit);
const float mel_hi = FreqToMel(upper_frequency_limit);
const float mel_span = mel_hi - mel_low;
const float mel_spacing = mel_span / ((float)num_channels);
int i;
for (i = 0; i < num_channels; ++i) {
center_frequencies[i] = mel_low + (mel_spacing * (i + 1));
}
}
static void QuantizeFilterbankWeights(const float float_weight, int16_t* weight,
int16_t* unweight) {
*weight = floor(float_weight * (1 << kFilterbankBits) + 0.5);
*unweight = floor((1.0 - float_weight) * (1 << kFilterbankBits) + 0.5);
}
int FilterbankPopulateState(const struct FilterbankConfig* config,
struct FilterbankState* state, int sample_rate,
int spectrum_size) {
state->num_channels = config->num_channels;
const int num_channels_plus_1 = config->num_channels + 1;
// How should we align things to index counts given the byte alignment?
const int index_alignment =
(kFilterbankIndexAlignment < sizeof(int16_t)
? 1
: kFilterbankIndexAlignment / sizeof(int16_t));
state->channel_frequency_starts =
malloc(num_channels_plus_1 * sizeof(*state->channel_frequency_starts));
state->channel_weight_starts =
malloc(num_channels_plus_1 * sizeof(*state->channel_weight_starts));
state->channel_widths =
malloc(num_channels_plus_1 * sizeof(*state->channel_widths));
state->work = malloc(num_channels_plus_1 * sizeof(*state->work));
float* center_mel_freqs =
malloc(num_channels_plus_1 * sizeof(*center_mel_freqs));
int16_t* actual_channel_starts =
malloc(num_channels_plus_1 * sizeof(*actual_channel_starts));
int16_t* actual_channel_widths =
malloc(num_channels_plus_1 * sizeof(*actual_channel_widths));
if (state->channel_frequency_starts == NULL ||
state->channel_weight_starts == NULL || state->channel_widths == NULL ||
center_mel_freqs == NULL || actual_channel_starts == NULL ||
actual_channel_widths == NULL) {
free(center_mel_freqs);
free(actual_channel_starts);
free(actual_channel_widths);
fprintf(stderr, "Failed to allocate channel buffers\n");
return 0;
}
CalculateCenterFrequencies(num_channels_plus_1, config->lower_band_limit,
config->upper_band_limit, center_mel_freqs);
// Always exclude DC.
const float hz_per_sbin = 0.5 * sample_rate / ((float)spectrum_size - 1);
state->start_index = 1.5 + config->lower_band_limit / hz_per_sbin;
state->end_index = 0; // Initialized to zero here, but actually set below.
// For each channel, we need to figure out what frequencies belong to it, and
// how much padding we need to add so that we can efficiently multiply the
// weights and unweights for accumulation. To simplify the multiplication
// logic, all channels will have some multiplication to do (even if there are
// no frequencies that accumulate to that channel) - they will be directed to
// a set of zero weights.
int chan_freq_index_start = state->start_index;
int weight_index_start = 0;
int needs_zeros = 0;
int chan;
for (chan = 0; chan < num_channels_plus_1; ++chan) {
// Keep jumping frequencies until we overshoot the bound on this channel.
int freq_index = chan_freq_index_start;
while (FreqToMel((freq_index)*hz_per_sbin) <= center_mel_freqs[chan]) {
++freq_index;
}
const int width = freq_index - chan_freq_index_start;
actual_channel_starts[chan] = chan_freq_index_start;
actual_channel_widths[chan] = width;
if (width == 0) {
// This channel doesn't actually get anything from the frequencies, it's
// always zero. We need then to insert some 'zero' weights into the
// output, and just redirect this channel to do a single multiplication at
// this point. For simplicity, the zeros are placed at the beginning of
// the weights arrays, so we have to go and update all the other
// weight_starts to reflect this shift (but only once).
state->channel_frequency_starts[chan] = 0;
state->channel_weight_starts[chan] = 0;
state->channel_widths[chan] = kFilterbankChannelBlockSize;
if (!needs_zeros) {
needs_zeros = 1;
int j;
for (j = 0; j < chan; ++j) {
state->channel_weight_starts[j] += kFilterbankChannelBlockSize;
}
weight_index_start += kFilterbankChannelBlockSize;
}
} else {
// How far back do we need to go to ensure that we have the proper
// alignment?
const int aligned_start =
(chan_freq_index_start / index_alignment) * index_alignment;
const int aligned_width = (chan_freq_index_start - aligned_start + width);
const int padded_width =
(((aligned_width - 1) / kFilterbankChannelBlockSize) + 1) *
kFilterbankChannelBlockSize;
state->channel_frequency_starts[chan] = aligned_start;
state->channel_weight_starts[chan] = weight_index_start;
state->channel_widths[chan] = padded_width;
weight_index_start += padded_width;
}
chan_freq_index_start = freq_index;
}
// Allocate the two arrays to store the weights - weight_index_start contains
// the index of what would be the next set of weights that we would need to
// add, so that's how many weights we need to allocate.
state->weights = calloc(weight_index_start, sizeof(*state->weights));
state->unweights = calloc(weight_index_start, sizeof(*state->unweights));
// If the alloc failed, we also need to nuke the arrays.
if (state->weights == NULL || state->unweights == NULL) {
free(center_mel_freqs);
free(actual_channel_starts);
free(actual_channel_widths);
fprintf(stderr, "Failed to allocate weights or unweights\n");
return 0;
}
// Next pass, compute all the weights. Since everything has been memset to
// zero, we only need to fill in the weights that correspond to some frequency
// for a channel.
const float mel_low = FreqToMel(config->lower_band_limit);
for (chan = 0; chan < num_channels_plus_1; ++chan) {
int frequency = actual_channel_starts[chan];
const int num_frequencies = actual_channel_widths[chan];
const int frequency_offset =
frequency - state->channel_frequency_starts[chan];
const int weight_start = state->channel_weight_starts[chan];
const float denom_val = (chan == 0) ? mel_low : center_mel_freqs[chan - 1];
int j;
for (j = 0; j < num_frequencies; ++j, ++frequency) {
const float weight =
(center_mel_freqs[chan] - FreqToMel(frequency * hz_per_sbin)) /
(center_mel_freqs[chan] - denom_val);
// Make the float into an integer for the weights (and unweights).
const int weight_index = weight_start + frequency_offset + j;
QuantizeFilterbankWeights(weight, state->weights + weight_index,
state->unweights + weight_index);
}
if (frequency > state->end_index) {
state->end_index = frequency;
}
}
free(center_mel_freqs);
free(actual_channel_starts);
free(actual_channel_widths);
if (state->end_index >= spectrum_size) {
fprintf(stderr, "Filterbank end_index is above spectrum size.\n");
return 0;
}
return 1;
}
void FilterbankFreeStateContents(struct FilterbankState* state) {
free(state->channel_frequency_starts);
free(state->channel_weight_starts);
free(state->channel_widths);
free(state->weights);
free(state->unweights);
free(state->work);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.c | C | apache-2.0 | 9,117 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
#ifdef __cplusplus
extern "C" {
#endif
struct FilterbankConfig {
// number of frequency channel buckets for filterbank
int num_channels;
// maximum frequency to include
float upper_band_limit;
// minimum frequency to include
float lower_band_limit;
// unused
int output_scale_shift;
};
// Fills the frontendConfig with "sane" defaults.
void FilterbankFillConfigWithDefaults(struct FilterbankConfig* config);
// Allocates any buffers.
int FilterbankPopulateState(const struct FilterbankConfig* config,
struct FilterbankState* state, int sample_rate,
int spectrum_size);
// Frees any allocated buffers.
void FilterbankFreeStateContents(struct FilterbankState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FILTERBANK_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h | C | apache-2.0 | 1,754 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
const int16_t* samples,
size_t num_samples,
size_t* num_samples_read) {
struct FrontendOutput output;
output.values = NULL;
output.size = 0;
// Try to apply the window - if it fails, return and wait for more data.
if (!WindowProcessSamples(&state->window, samples, num_samples,
num_samples_read)) {
return output;
}
// Apply the FFT to the window's output (and scale it so that the fixed point
// FFT can have as much resolution as possible).
int input_shift =
15 - MostSignificantBit32(state->window.max_abs_output_value);
FftCompute(&state->fft, state->window.output, input_shift);
// We can re-ruse the fft's output buffer to hold the energy.
int32_t* energy = (int32_t*)state->fft.output;
FilterbankConvertFftComplexToEnergy(&state->filterbank, state->fft.output,
energy);
FilterbankAccumulateChannels(&state->filterbank, energy);
uint32_t* scaled_filterbank = FilterbankSqrt(&state->filterbank, input_shift);
// Apply noise reduction.
NoiseReductionApply(&state->noise_reduction, scaled_filterbank);
if (state->pcan_gain_control.enable_pcan) {
PcanGainControlApply(&state->pcan_gain_control, scaled_filterbank);
}
// Apply the log and scale.
int correction_bits =
MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
uint16_t* logged_filterbank =
LogScaleApply(&state->log_scale, scaled_filterbank,
state->filterbank.num_channels, correction_bits);
output.size = state->filterbank.num_channels;
output.values = logged_filterbank;
return output;
}
void FrontendReset(struct FrontendState* state) {
WindowReset(&state->window);
FftReset(&state->fft);
FilterbankReset(&state->filterbank);
NoiseReductionReset(&state->noise_reduction);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend.c | C | apache-2.0 | 2,851 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
#include <stdint.h>
#include <stdlib.h>
#include "tensorflow/lite/experimental/microfrontend/lib/fft.h"
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank.h"
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
#ifdef __cplusplus
extern "C" {
#endif
struct FrontendState {
struct WindowState window;
struct FftState fft;
struct FilterbankState filterbank;
struct NoiseReductionState noise_reduction;
struct PcanGainControlState pcan_gain_control;
struct LogScaleState log_scale;
};
struct FrontendOutput {
const uint16_t* values;
size_t size;
};
// Main entry point to processing frontend samples. Updates num_samples_read to
// contain the number of samples that have been consumed from the input array.
// Returns a struct containing the generated output. If not enough samples were
// added to generate a feature vector, the returned size will be 0 and the
// values pointer will be NULL. Note that the output pointer will be invalidated
// as soon as FrontendProcessSamples is called again, so copy the contents
// elsewhere if you need to use them later.
struct FrontendOutput FrontendProcessSamples(struct FrontendState* state,
const int16_t* samples,
size_t num_samples,
size_t* num_samples_read);
void FrontendReset(struct FrontendState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend.h | C | apache-2.0 | 2,585 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_io.h"
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/fft_io.h"
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_io.h"
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h"
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h"
#include "tensorflow/lite/experimental/microfrontend/lib/window_io.h"
int WriteFrontendStateMemmap(const char* header, const char* source,
const struct FrontendState* state) {
// Write a header that just has our init function.
FILE* fp = fopen(header, "w");
if (!fp) {
fprintf(stderr, "Failed to open header '%s' for write\n", header);
return 0;
}
fprintf(fp, "#ifndef FRONTEND_STATE_MEMMAP_H_\n");
fprintf(fp, "#define FRONTEND_STATE_MEMMAP_H_\n");
fprintf(fp, "\n");
fprintf(fp, "#include \"frontend.h\"\n");
fprintf(fp, "\n");
fprintf(fp, "struct FrontendState* GetFrontendStateMemmap();\n");
fprintf(fp, "\n");
fprintf(fp, "#endif // FRONTEND_STATE_MEMMAP_H_\n");
fclose(fp);
// Write out the source file that actually has everything in it.
fp = fopen(source, "w");
if (!fp) {
fprintf(stderr, "Failed to open source '%s' for write\n", source);
return 0;
}
fprintf(fp, "#include \"%s\"\n", header);
fprintf(fp, "\n");
WindowWriteMemmapPreamble(fp, &state->window);
FftWriteMemmapPreamble(fp, &state->fft);
FilterbankWriteMemmapPreamble(fp, &state->filterbank);
NoiseReductionWriteMemmapPreamble(fp, &state->noise_reduction);
fprintf(fp, "static struct FrontendState state;\n");
fprintf(fp, "struct FrontendState* GetFrontendStateMemmap() {\n");
WindowWriteMemmap(fp, &state->window, " (&state.window)");
FftWriteMemmap(fp, &state->fft, " (&state.fft)");
FilterbankWriteMemmap(fp, &state->filterbank, " (&state.filterbank)");
NoiseReductionWriteMemmap(fp, &state->noise_reduction,
" (&state.noise_reduction)");
LogScaleWriteMemmap(fp, &state->log_scale, " (&state.log_scale)");
fprintf(fp, " FftInit(&state.fft);\n");
fprintf(fp, " FrontendReset(&state);\n");
fprintf(fp, " return &state;\n");
fprintf(fp, "}\n");
fclose(fp);
return 1;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_io.c | C | apache-2.0 | 2,951 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_IO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_IO_H_
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#ifdef __cplusplus
extern "C" {
#endif
int WriteFrontendStateMemmap(const char* header, const char* source,
const struct FrontendState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_IO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_io.h | C | apache-2.0 | 1,172 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
int main(int argc, char** argv) {
struct FrontendConfig frontend_config;
FrontendFillConfigWithDefaults(&frontend_config);
char* filename = argv[1];
int sample_rate = 16000;
struct FrontendState frontend_state;
if (!FrontendPopulateState(&frontend_config, &frontend_state, sample_rate)) {
fprintf(stderr, "Failed to populate frontend state\n");
FrontendFreeStateContents(&frontend_state);
return 1;
}
FILE* fp = fopen(filename, "r");
if (fp == NULL) {
fprintf(stderr, "Failed to open %s for read\n", filename);
return 1;
}
fseek(fp, 0L, SEEK_END);
size_t audio_file_size = ftell(fp) / sizeof(int16_t);
fseek(fp, 0L, SEEK_SET);
int16_t* audio_data = malloc(audio_file_size * sizeof(int16_t));
int16_t* original_audio_data = audio_data;
if (audio_file_size !=
fread(audio_data, sizeof(int16_t), audio_file_size, fp)) {
fprintf(stderr, "Failed to read in all audio data\n");
fclose(fp);
return 1;
}
while (audio_file_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
&frontend_state, audio_data, audio_file_size, &num_samples_read);
audio_data += num_samples_read;
audio_file_size -= num_samples_read;
if (output.values != NULL) {
int i;
for (i = 0; i < output.size; ++i) {
printf("%d ", output.values[i]);
}
printf("\n");
}
}
FrontendFreeStateContents(&frontend_state);
free(original_audio_data);
fclose(fp);
return 0;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_main.c | C | apache-2.0 | 2,348 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_io.h"
int main(int argc, char** argv) {
if (argc != 3) {
fprintf(stderr,
"%s requires exactly two parameters - the names of the header and "
"source files to save\n",
argv[0]);
return 1;
}
struct FrontendConfig frontend_config;
FrontendFillConfigWithDefaults(&frontend_config);
int sample_rate = 16000;
struct FrontendState frontend_state;
if (!FrontendPopulateState(&frontend_config, &frontend_state, sample_rate)) {
fprintf(stderr, "Failed to populate frontend state\n");
FrontendFreeStateContents(&frontend_state);
return 1;
}
if (!WriteFrontendStateMemmap(argv[1], argv[2], &frontend_state)) {
fprintf(stderr, "Failed to write memmap\n");
FrontendFreeStateContents(&frontend_state);
return 1;
}
FrontendFreeStateContents(&frontend_state);
return 0;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_memmap_generator.c | C | apache-2.0 | 1,749 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stdio.h>
#include "memmap.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
int main(int argc, char** argv) {
struct FrontendState* frontend_state = GetFrontendStateMemmap();
char* filename = argv[1];
FILE* fp = fopen(filename, "r");
if (fp == NULL) {
fprintf(stderr, "Failed to open %s for read\n", filename);
return 1;
}
fseek(fp, 0L, SEEK_END);
size_t audio_file_size = ftell(fp) / sizeof(int16_t);
fseek(fp, 0L, SEEK_SET);
int16_t* audio_data = malloc(audio_file_size * sizeof(int16_t));
int16_t* original_audio_data = audio_data;
if (audio_file_size !=
fread(audio_data, sizeof(int16_t), audio_file_size, fp)) {
fprintf(stderr, "Failed to read in all audio data\n");
fclose(fp);
return 1;
}
while (audio_file_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
frontend_state, audio_data, audio_file_size, &num_samples_read);
audio_data += num_samples_read;
audio_file_size -= num_samples_read;
if (output.values != NULL) {
int i;
for (i = 0; i < output.size; ++i) {
printf("%d ", output.values[i]);
}
printf("\n");
}
}
free(original_audio_data);
fclose(fp);
return 0;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_memmap_main.c | C | apache-2.0 | 1,947 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int kSampleRate = 1000;
const int kWindowSamples = 25;
const int kStepSamples = 10;
const int16_t kFakeAudioData[] = {
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768};
// Test end-to-end frontend behaviors.
class FrontendTestConfig {
public:
FrontendTestConfig() {
config_.window.size_ms = 25;
config_.window.step_size_ms = 10;
config_.noise_reduction.smoothing_bits = 10;
config_.filterbank.num_channels = 2;
config_.filterbank.lower_band_limit = 8.0;
config_.filterbank.upper_band_limit = 450.0;
config_.noise_reduction.smoothing_bits = 10;
config_.noise_reduction.even_smoothing = 0.025;
config_.noise_reduction.odd_smoothing = 0.06;
config_.noise_reduction.min_signal_remaining = 0.05;
config_.pcan_gain_control.enable_pcan = true;
config_.pcan_gain_control.strength = 0.95;
config_.pcan_gain_control.offset = 80.0;
config_.pcan_gain_control.gain_bits = 21;
config_.log_scale.enable_log = true;
config_.log_scale.scale_shift = 6;
}
struct FrontendConfig config_;
};
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(FrontendTest_CheckOutputValues) {
FrontendTestConfig config;
struct FrontendState state;
TF_LITE_MICRO_EXPECT(
FrontendPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]), &num_samples_read);
const uint16_t expected[] = {479, 425};
TF_LITE_MICRO_EXPECT_EQ(output.size, sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < output.size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(output.values[i], expected[i]);
}
FrontendFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FrontendTest_CheckConsecutiveWindow) {
FrontendTestConfig config;
struct FrontendState state;
TF_LITE_MICRO_EXPECT(
FrontendPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
FrontendProcessSamples(&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]),
&num_samples_read);
struct FrontendOutput output = FrontendProcessSamples(
&state, kFakeAudioData + kWindowSamples,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) - kWindowSamples,
&num_samples_read);
const int16_t expected[] = {436, 378};
TF_LITE_MICRO_EXPECT_EQ(output.size, sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < output.size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(output.values[i], expected[i]);
}
FrontendFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(FrontendTest_CheckNotEnoughSamples) {
FrontendTestConfig config;
struct FrontendState state;
TF_LITE_MICRO_EXPECT(
FrontendPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
FrontendProcessSamples(&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]),
&num_samples_read);
FrontendProcessSamples(
&state, kFakeAudioData + kWindowSamples,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) - kWindowSamples,
&num_samples_read);
struct FrontendOutput output = FrontendProcessSamples(
&state, kFakeAudioData + kWindowSamples + kStepSamples,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) - kWindowSamples -
kStepSamples,
&num_samples_read);
TF_LITE_MICRO_EXPECT_EQ(output.size, 0);
TF_LITE_MICRO_EXPECT(output.values == nullptr);
FrontendFreeStateContents(&state);
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_test.cc | C++ | apache-2.0 | 4,672 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include <stdio.h>
#include <string.h>
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
void FrontendFillConfigWithDefaults(struct FrontendConfig* config) {
WindowFillConfigWithDefaults(&config->window);
FilterbankFillConfigWithDefaults(&config->filterbank);
NoiseReductionFillConfigWithDefaults(&config->noise_reduction);
PcanGainControlFillConfigWithDefaults(&config->pcan_gain_control);
LogScaleFillConfigWithDefaults(&config->log_scale);
}
int FrontendPopulateState(const struct FrontendConfig* config,
struct FrontendState* state, int sample_rate) {
memset(state, 0, sizeof(*state));
if (!WindowPopulateState(&config->window, &state->window, sample_rate)) {
fprintf(stderr, "Failed to populate window state\n");
return 0;
}
if (!FftPopulateState(&state->fft, state->window.size)) {
fprintf(stderr, "Failed to populate fft state\n");
return 0;
}
FftInit(&state->fft);
if (!FilterbankPopulateState(&config->filterbank, &state->filterbank,
sample_rate, state->fft.fft_size / 2 + 1)) {
fprintf(stderr, "Failed to populate filterbank state\n");
return 0;
}
if (!NoiseReductionPopulateState(&config->noise_reduction,
&state->noise_reduction,
state->filterbank.num_channels)) {
fprintf(stderr, "Failed to populate noise reduction state\n");
return 0;
}
int input_correction_bits =
MostSignificantBit32(state->fft.fft_size) - 1 - (kFilterbankBits / 2);
if (!PcanGainControlPopulateState(
&config->pcan_gain_control, &state->pcan_gain_control,
state->noise_reduction.estimate, state->filterbank.num_channels,
state->noise_reduction.smoothing_bits, input_correction_bits)) {
fprintf(stderr, "Failed to populate pcan gain control state\n");
return 0;
}
if (!LogScalePopulateState(&config->log_scale, &state->log_scale)) {
fprintf(stderr, "Failed to populate log scale state\n");
return 0;
}
FrontendReset(state);
// All good, return a true value.
return 1;
}
void FrontendFreeStateContents(struct FrontendState* state) {
WindowFreeStateContents(&state->window);
FftFreeStateContents(&state->fft);
FilterbankFreeStateContents(&state->filterbank);
NoiseReductionFreeStateContents(&state->noise_reduction);
PcanGainControlFreeStateContents(&state->pcan_gain_control);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.c | C | apache-2.0 | 3,200 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
#include "tensorflow/lite/experimental/microfrontend/lib/fft_util.h"
#include "tensorflow/lite/experimental/microfrontend/lib/filterbank_util.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
#ifdef __cplusplus
extern "C" {
#endif
struct FrontendConfig {
struct WindowConfig window;
struct FilterbankConfig filterbank;
struct NoiseReductionConfig noise_reduction;
struct PcanGainControlConfig pcan_gain_control;
struct LogScaleConfig log_scale;
};
// Fills the frontendConfig with "sane" defaults.
void FrontendFillConfigWithDefaults(struct FrontendConfig* config);
// Allocates any buffers.
int FrontendPopulateState(const struct FrontendConfig* config,
struct FrontendState* state, int sample_rate);
// Frees any allocated buffers.
void FrontendFreeStateContents(struct FrontendState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_FRONTEND_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/frontend_util.h | C | apache-2.0 | 2,103 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
const uint16_t kLogLut[]
#ifndef _MSC_VER
__attribute__((aligned(4)))
#endif // _MSV_VER
= {0, 224, 442, 654, 861, 1063, 1259, 1450, 1636, 1817, 1992, 2163,
2329, 2490, 2646, 2797, 2944, 3087, 3224, 3358, 3487, 3611, 3732, 3848,
3960, 4068, 4172, 4272, 4368, 4460, 4549, 4633, 4714, 4791, 4864, 4934,
5001, 5063, 5123, 5178, 5231, 5280, 5326, 5368, 5408, 5444, 5477, 5507,
5533, 5557, 5578, 5595, 5610, 5622, 5631, 5637, 5640, 5641, 5638, 5633,
5626, 5615, 5602, 5586, 5568, 5547, 5524, 5498, 5470, 5439, 5406, 5370,
5332, 5291, 5249, 5203, 5156, 5106, 5054, 5000, 4944, 4885, 4825, 4762,
4697, 4630, 4561, 4490, 4416, 4341, 4264, 4184, 4103, 4020, 3935, 3848,
3759, 3668, 3575, 3481, 3384, 3286, 3186, 3084, 2981, 2875, 2768, 2659,
2549, 2437, 2323, 2207, 2090, 1971, 1851, 1729, 1605, 1480, 1353, 1224,
1094, 963, 830, 695, 559, 421, 282, 142, 0, 0};
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_lut.c | C | apache-2.0 | 1,686 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
#include <stdint.h>
#ifdef __cplusplus
extern "C" {
#endif
// Number of segments in the log lookup table. The table will be kLogSegments+1
// in length (with some padding).
#define kLogSegments 128
#define kLogSegmentsLog2 7
// Scale used by lookup table.
#define kLogScale 65536
#define kLogScaleLog2 16
#define kLogCoeff 45426
extern const uint16_t kLogLut[];
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_LUT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_lut.h | C | apache-2.0 | 1,282 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
#include "tensorflow/lite/experimental/microfrontend/lib/log_lut.h"
#define kuint16max 0x0000FFFF
// The following functions implement integer logarithms of various sizes. The
// approximation is calculated according to method described in
// www.inti.gob.ar/electronicaeinformatica/instrumentacion/utic/
// publicaciones/SPL2007/Log10-spl07.pdf
// It first calculates log2 of the input and then converts it to natural
// logarithm.
static uint32_t Log2FractionPart(const uint32_t x, const uint32_t log2x) {
// Part 1
int32_t frac = x - (1LL << log2x);
if (log2x < kLogScaleLog2) {
frac <<= kLogScaleLog2 - log2x;
} else {
frac >>= log2x - kLogScaleLog2;
}
// Part 2
const uint32_t base_seg = frac >> (kLogScaleLog2 - kLogSegmentsLog2);
const uint32_t seg_unit =
(((uint32_t)1) << kLogScaleLog2) >> kLogSegmentsLog2;
const int32_t c0 = kLogLut[base_seg];
const int32_t c1 = kLogLut[base_seg + 1];
const int32_t seg_base = seg_unit * base_seg;
const int32_t rel_pos = ((c1 - c0) * (frac - seg_base)) >> kLogScaleLog2;
return frac + c0 + rel_pos;
}
static uint32_t Log(const uint32_t x, const uint32_t scale_shift) {
const uint32_t integer = MostSignificantBit32(x) - 1;
const uint32_t fraction = Log2FractionPart(x, integer);
const uint32_t log2 = (integer << kLogScaleLog2) + fraction;
const uint32_t round = kLogScale / 2;
const uint32_t loge = (((uint64_t)kLogCoeff) * log2 + round) >> kLogScaleLog2;
// Finally scale to our output scale
const uint32_t loge_scaled = ((loge << scale_shift) + round) >> kLogScaleLog2;
return loge_scaled;
}
uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
int signal_size, int correction_bits) {
const int scale_shift = state->scale_shift;
uint16_t* output = (uint16_t*)signal;
uint16_t* ret = output;
int i;
for (i = 0; i < signal_size; ++i) {
uint32_t value = *signal++;
if (state->enable_log) {
if (correction_bits < 0) {
value >>= -correction_bits;
} else {
value <<= correction_bits;
}
if (value > 1) {
value = Log(value, scale_shift);
} else {
value = 0;
}
}
*output++ = (value < kuint16max) ? value : kuint16max;
}
return ret;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale.c | C | apache-2.0 | 3,093 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
#include <stdint.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
struct LogScaleState {
int enable_log;
int scale_shift;
};
// Applies a fixed point logarithm to the signal and converts it to 16 bit. Note
// that the signal array will be modified.
uint16_t* LogScaleApply(struct LogScaleState* state, uint32_t* signal,
int signal_size, int correction_bits);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale.h | C | apache-2.0 | 1,325 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h"
void LogScaleWriteMemmap(FILE* fp, const struct LogScaleState* state,
const char* variable) {
fprintf(fp, "%s->enable_log = %d;\n", variable, state->enable_log);
fprintf(fp, "%s->scale_shift = %d;\n", variable, state->scale_shift);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.c | C | apache-2.0 | 1,006 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_IO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_IO_H_
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
#ifdef __cplusplus
extern "C" {
#endif
void LogScaleWriteMemmap(FILE* fp, const struct LogScaleState* state,
const char* variable);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_IO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale_io.h | C | apache-2.0 | 1,180 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int kScaleShift = 6;
const int kCorrectionBits = -1;
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(LogScaleTest_CheckOutputValues) {
struct LogScaleState state;
state.enable_log = true;
state.scale_shift = kScaleShift;
uint32_t fake_signal[] = {3578, 1533};
uint16_t* output = LogScaleApply(&state, fake_signal,
sizeof(fake_signal) / sizeof(fake_signal[0]),
kCorrectionBits);
const uint16_t expected[] = {479, 425};
int i;
for (i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
TF_LITE_MICRO_EXPECT_EQ(output[i], expected[i]);
}
}
TF_LITE_MICRO_TEST(LogScaleTest_CheckOutputValuesNoLog) {
struct LogScaleState state;
state.enable_log = false;
state.scale_shift = kScaleShift;
uint32_t fake_signal[] = {85964, 45998};
uint16_t* output = LogScaleApply(&state, fake_signal,
sizeof(fake_signal) / sizeof(fake_signal[0]),
kCorrectionBits);
const uint16_t expected[] = {65535, 45998};
int i;
for (i = 0; i < sizeof(expected) / sizeof(expected[0]); ++i) {
TF_LITE_MICRO_EXPECT_EQ(output[i], expected[i]);
}
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale_test.cc | C++ | apache-2.0 | 2,131 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h"
void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config) {
config->enable_log = 1;
config->scale_shift = 6;
}
int LogScalePopulateState(const struct LogScaleConfig* config,
struct LogScaleState* state) {
state->enable_log = config->enable_log;
state->scale_shift = config->scale_shift;
return 1;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.c | C | apache-2.0 | 1,090 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
#include <stdint.h>
#include <stdlib.h>
#include "tensorflow/lite/experimental/microfrontend/lib/log_scale.h"
#ifdef __cplusplus
extern "C" {
#endif
struct LogScaleConfig {
// set to false (0) to disable this module
int enable_log;
// scale results by 2^(scale_shift)
int scale_shift;
};
// Populates the LogScaleConfig with "sane" default values.
void LogScaleFillConfigWithDefaults(struct LogScaleConfig* config);
// Allocates any buffers.
int LogScalePopulateState(const struct LogScaleConfig* config,
struct LogScaleState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_LOG_SCALE_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/log_scale_util.h | C | apache-2.0 | 1,511 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
#include <string.h>
void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal) {
int i;
for (i = 0; i < state->num_channels; ++i) {
const uint32_t smoothing =
((i & 1) == 0) ? state->even_smoothing : state->odd_smoothing;
const uint32_t one_minus_smoothing = (1 << kNoiseReductionBits) - smoothing;
// Update the estimate of the noise.
const uint32_t signal_scaled_up = signal[i] << state->smoothing_bits;
uint32_t estimate =
(((uint64_t)signal_scaled_up * smoothing) +
((uint64_t)state->estimate[i] * one_minus_smoothing)) >>
kNoiseReductionBits;
state->estimate[i] = estimate;
// Make sure that we can't get a negative value for the signal - estimate.
if (estimate > signal_scaled_up) {
estimate = signal_scaled_up;
}
const uint32_t floor =
((uint64_t)signal[i] * state->min_signal_remaining) >>
kNoiseReductionBits;
const uint32_t subtracted =
(signal_scaled_up - estimate) >> state->smoothing_bits;
const uint32_t output = subtracted > floor ? subtracted : floor;
signal[i] = output;
}
}
void NoiseReductionReset(struct NoiseReductionState* state) {
memset(state->estimate, 0, sizeof(*state->estimate) * state->num_channels);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.c | C | apache-2.0 | 2,025 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
#define kNoiseReductionBits 14
#include <stdint.h>
#include <stdlib.h>
#ifdef __cplusplus
extern "C" {
#endif
struct NoiseReductionState {
int smoothing_bits;
uint16_t even_smoothing;
uint16_t odd_smoothing;
uint16_t min_signal_remaining;
int num_channels;
uint32_t* estimate;
};
// Removes stationary noise from each channel of the signal using a low pass
// filter.
void NoiseReductionApply(struct NoiseReductionState* state, uint32_t* signal);
void NoiseReductionReset(struct NoiseReductionState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h | C | apache-2.0 | 1,465 |