|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ |
|
|
#define TENSORFLOW_LITE_MICRO_TEST_HELPERS_H_ |
|
|
|
|
|
|
|
|
|
|
|
#include <cstdint> |
|
|
#include <limits> |
|
|
|
|
|
#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite//kernels/internal/tensor_ctypes.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/c/common.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" |
|
|
|
|
|
namespace tflite { |
|
|
namespace testing { |
|
|
|
|
|
constexpr int kOfflinePlannerHeaderSize = 3; |
|
|
|
|
|
struct NodeConnection_ { |
|
|
std::initializer_list<int32_t> input; |
|
|
std::initializer_list<int32_t> output; |
|
|
}; |
|
|
typedef struct NodeConnection_ NodeConnection; |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SimpleStatefulOp { |
|
|
static constexpr int kBufferNotAllocated = 0; |
|
|
|
|
|
static constexpr int kInputTensor = 0; |
|
|
|
|
|
static constexpr int kMedianTensor = 0; |
|
|
static constexpr int kInvokeCount = 1; |
|
|
struct OpData { |
|
|
int* invoke_count = nullptr; |
|
|
int sorting_buffer = kBufferNotAllocated; |
|
|
}; |
|
|
|
|
|
public: |
|
|
static const TfLiteRegistration* getRegistration(); |
|
|
static TfLiteRegistration* GetMutableRegistration(); |
|
|
static void* Init(TfLiteContext* context, const char* buffer, size_t length); |
|
|
static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); |
|
|
static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); |
|
|
}; |
|
|
|
|
|
class MockCustom { |
|
|
public: |
|
|
static const TfLiteRegistration* getRegistration(); |
|
|
static TfLiteRegistration* GetMutableRegistration(); |
|
|
static void* Init(TfLiteContext* context, const char* buffer, size_t length); |
|
|
static void Free(TfLiteContext* context, void* buffer); |
|
|
static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); |
|
|
static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); |
|
|
|
|
|
static bool freed_; |
|
|
}; |
|
|
|
|
|
|
|
|
|
|
|
class MultipleInputs { |
|
|
public: |
|
|
static const TfLiteRegistration* getRegistration(); |
|
|
static TfLiteRegistration* GetMutableRegistration(); |
|
|
static void* Init(TfLiteContext* context, const char* buffer, size_t length); |
|
|
static void Free(TfLiteContext* context, void* buffer); |
|
|
static TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node); |
|
|
static TfLiteStatus Invoke(TfLiteContext* context, TfLiteNode* node); |
|
|
|
|
|
static bool freed_; |
|
|
}; |
|
|
|
|
|
|
|
|
AllOpsResolver GetOpResolver(); |
|
|
|
|
|
|
|
|
|
|
|
const Model* GetSimpleMockModel(); |
|
|
|
|
|
|
|
|
|
|
|
const Model* GetComplexMockModel(); |
|
|
|
|
|
|
|
|
const Model* GetSimpleModelWithBranch(); |
|
|
|
|
|
|
|
|
|
|
|
const Model* GetSimpleMultipleInputsModel(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const Model* GetModelWithOfflinePlanning(int num_tensors, |
|
|
const int32_t* metadata_buffer, |
|
|
NodeConnection* node_conn, |
|
|
int num_conns, |
|
|
int num_subgraph_inputs = 0); |
|
|
|
|
|
|
|
|
const Model* GetSimpleStatefulModel(); |
|
|
|
|
|
|
|
|
const Tensor* Create1dFlatbufferTensor(int size, bool is_variable = false); |
|
|
|
|
|
|
|
|
|
|
|
const Tensor* CreateQuantizedFlatbufferTensor(int size); |
|
|
|
|
|
|
|
|
const Tensor* CreateMissingQuantizationFlatbufferTensor(int size); |
|
|
|
|
|
|
|
|
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* |
|
|
CreateFlatbufferBuffers(); |
|
|
|
|
|
|
|
|
int TestStrcmp(const char* a, const char* b); |
|
|
|
|
|
|
|
|
void ReportOpError(struct TfLiteContext* context, const char* format, ...); |
|
|
|
|
|
void PopulateContext(TfLiteTensor* tensors, int tensors_size, |
|
|
TfLiteContext* context); |
|
|
|
|
|
|
|
|
|
|
|
TfLiteIntArray* IntArrayFromInts(const int* int_array); |
|
|
|
|
|
|
|
|
|
|
|
TfLiteFloatArray* FloatArrayFromFloats(const float* floats); |
|
|
|
|
|
template <typename T> |
|
|
TfLiteTensor CreateTensor(const T* data, TfLiteIntArray* dims, |
|
|
const bool is_variable = false) { |
|
|
TfLiteTensor result; |
|
|
result.dims = dims; |
|
|
result.params = {}; |
|
|
result.quantization = {kTfLiteNoQuantization, nullptr}; |
|
|
result.is_variable = is_variable; |
|
|
result.allocation_type = kTfLiteMemNone; |
|
|
result.type = typeToTfLiteType<T>(); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
result.data.data = const_cast<T*>(data); |
|
|
result.quantization = {kTfLiteAffineQuantization, nullptr}; |
|
|
result.bytes = ElementCount(*dims) * sizeof(T); |
|
|
return result; |
|
|
} |
|
|
|
|
|
template <typename T> |
|
|
TfLiteTensor CreateQuantizedTensor(const T* data, TfLiteIntArray* dims, |
|
|
const float scale, const int zero_point = 0, |
|
|
const bool is_variable = false) { |
|
|
TfLiteTensor result = CreateTensor(data, dims, is_variable); |
|
|
result.params = {scale, zero_point}; |
|
|
result.quantization = {kTfLiteAffineQuantization, nullptr}; |
|
|
return result; |
|
|
} |
|
|
|
|
|
template <typename T> |
|
|
TfLiteTensor CreateQuantizedTensor(const float* input, T* quantized, |
|
|
TfLiteIntArray* dims, float scale, |
|
|
int zero_point, bool is_variable = false) { |
|
|
int input_size = ElementCount(*dims); |
|
|
tflite::Quantize(input, quantized, input_size, scale, zero_point); |
|
|
return CreateQuantizedTensor(quantized, dims, scale, zero_point, is_variable); |
|
|
} |
|
|
|
|
|
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, |
|
|
TfLiteIntArray* dims, float input_scale, |
|
|
float weights_scale, |
|
|
bool is_variable = false); |
|
|
|
|
|
|
|
|
|
|
|
TfLiteTensor CreatePerChannelQuantizedBiasTensor( |
|
|
const float* input, int32_t* quantized, TfLiteIntArray* dims, |
|
|
float input_scale, float* weight_scales, float* scales, int* zero_points, |
|
|
TfLiteAffineQuantization* affine_quant, int quantized_dimension, |
|
|
bool is_variable = false); |
|
|
|
|
|
TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( |
|
|
const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, |
|
|
int* zero_points, TfLiteAffineQuantization* affine_quant, |
|
|
int quantized_dimension, bool is_variable = false); |
|
|
|
|
|
|
|
|
size_t GetModelTensorCount(const Model* model); |
|
|
|
|
|
|
|
|
template <typename T> |
|
|
inline float ScaleFromMinMax(const float min, const float max) { |
|
|
return (max - min) / |
|
|
static_cast<float>((std::numeric_limits<T>::max() * 1.0) - |
|
|
std::numeric_limits<T>::min()); |
|
|
} |
|
|
|
|
|
|
|
|
template <typename T> |
|
|
inline int ZeroPointFromMinMax(const float min, const float max) { |
|
|
return static_cast<int>(std::numeric_limits<T>::min()) + |
|
|
static_cast<int>(-min / ScaleFromMinMax<T>(min, max) + 0.5f); |
|
|
} |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
#endif |
|
|
|