|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/test_helpers.h" |
|
|
|
|
|
#include <cstdarg> |
|
|
#include <cstddef> |
|
|
#include <cstdint> |
|
|
#include <initializer_list> |
|
|
#include <new> |
|
|
|
|
|
#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/c/common.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/compatibility.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/kernels/kernel_util.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/all_ops_resolver.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/micro_utils.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" |
|
|
|
|
|
|
|
|
|
|
|
namespace tflite { |
|
|
namespace testing { |
|
|
namespace { |
|
|
|
|
|
class StackAllocator : public flatbuffers::Allocator { |
|
|
public: |
|
|
StackAllocator() : data_(data_backing_), data_size_(0) {} |
|
|
|
|
|
uint8_t* allocate(size_t size) override { |
|
|
TFLITE_DCHECK((data_size_ + size) <= kStackAllocatorSize); |
|
|
uint8_t* result = data_; |
|
|
data_ += size; |
|
|
data_size_ += size; |
|
|
return result; |
|
|
} |
|
|
|
|
|
void deallocate(uint8_t* p, size_t) override {} |
|
|
|
|
|
static StackAllocator& instance() { |
|
|
|
|
|
static char inst_memory[sizeof(StackAllocator)]; |
|
|
static StackAllocator* inst = new (inst_memory) StackAllocator; |
|
|
return *inst; |
|
|
} |
|
|
|
|
|
static constexpr size_t kStackAllocatorSize = 8192; |
|
|
|
|
|
private: |
|
|
uint8_t data_backing_[kStackAllocatorSize]; |
|
|
uint8_t* data_; |
|
|
int data_size_; |
|
|
}; |
|
|
|
|
|
flatbuffers::FlatBufferBuilder* BuilderInstance() { |
|
|
static char inst_memory[sizeof(flatbuffers::FlatBufferBuilder)]; |
|
|
static flatbuffers::FlatBufferBuilder* inst = |
|
|
new (inst_memory) flatbuffers::FlatBufferBuilder( |
|
|
StackAllocator::kStackAllocatorSize, &StackAllocator::instance()); |
|
|
return inst; |
|
|
} |
|
|
|
|
|
|
|
|
class ModelBuilder { |
|
|
public: |
|
|
typedef int32_t Tensor; |
|
|
typedef int Operator; |
|
|
typedef int Node; |
|
|
|
|
|
|
|
|
explicit ModelBuilder(flatbuffers::FlatBufferBuilder* builder) |
|
|
: builder_(builder) {} |
|
|
|
|
|
|
|
|
Operator RegisterOp(BuiltinOperator op, const char* custom_code); |
|
|
|
|
|
|
|
|
Tensor AddTensor(TensorType type, std::initializer_list<int32_t> shape) { |
|
|
return AddTensorImpl(type, false, shape); |
|
|
} |
|
|
|
|
|
|
|
|
Tensor AddVariableTensor(TensorType type, |
|
|
std::initializer_list<int32_t> shape) { |
|
|
return AddTensorImpl(type, true, shape); |
|
|
} |
|
|
|
|
|
|
|
|
Node AddNode(Operator op, std::initializer_list<Tensor> inputs, |
|
|
std::initializer_list<Tensor> outputs); |
|
|
|
|
|
void AddMetadata(const char* description_string, |
|
|
const int32_t* metadata_buffer_data, size_t num_elements); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const Model* BuildModel(std::initializer_list<Tensor> inputs, |
|
|
std::initializer_list<Tensor> outputs, |
|
|
size_t num_subgraph_inputs = 0); |
|
|
|
|
|
private: |
|
|
|
|
|
Tensor AddTensorImpl(TensorType type, bool is_variable, |
|
|
std::initializer_list<int32_t> shape); |
|
|
|
|
|
flatbuffers::FlatBufferBuilder* builder_; |
|
|
|
|
|
static constexpr int kMaxOperatorCodes = 10; |
|
|
flatbuffers::Offset<tflite::OperatorCode> operator_codes_[kMaxOperatorCodes]; |
|
|
int next_operator_code_id_ = 0; |
|
|
|
|
|
static constexpr int kMaxOperators = 50; |
|
|
flatbuffers::Offset<tflite::Operator> operators_[kMaxOperators]; |
|
|
int next_operator_id_ = 0; |
|
|
|
|
|
static constexpr int kMaxTensors = 50; |
|
|
flatbuffers::Offset<tflite::Tensor> tensors_[kMaxTensors]; |
|
|
|
|
|
static constexpr int kMaxMetadataBuffers = 10; |
|
|
|
|
|
static constexpr int kMaxMetadatas = 10; |
|
|
flatbuffers::Offset<Metadata> metadata_[kMaxMetadatas]; |
|
|
|
|
|
flatbuffers::Offset<Buffer> metadata_buffers_[kMaxMetadataBuffers]; |
|
|
|
|
|
int nbr_of_metadata_buffers_ = 0; |
|
|
|
|
|
int next_tensor_id_ = 0; |
|
|
}; |
|
|
|
|
|
ModelBuilder::Operator ModelBuilder::RegisterOp(BuiltinOperator op, |
|
|
const char* custom_code) { |
|
|
TFLITE_DCHECK(next_operator_code_id_ <= kMaxOperatorCodes); |
|
|
operator_codes_[next_operator_code_id_] = tflite::CreateOperatorCodeDirect( |
|
|
*builder_, 0, custom_code, 0, op); |
|
|
next_operator_code_id_++; |
|
|
return next_operator_code_id_ - 1; |
|
|
} |
|
|
|
|
|
ModelBuilder::Node ModelBuilder::AddNode( |
|
|
ModelBuilder::Operator op, |
|
|
std::initializer_list<ModelBuilder::Tensor> inputs, |
|
|
std::initializer_list<ModelBuilder::Tensor> outputs) { |
|
|
TFLITE_DCHECK(next_operator_id_ <= kMaxOperators); |
|
|
operators_[next_operator_id_] = tflite::CreateOperator( |
|
|
*builder_, op, builder_->CreateVector(inputs.begin(), inputs.size()), |
|
|
builder_->CreateVector(outputs.begin(), outputs.size()), |
|
|
BuiltinOptions_NONE); |
|
|
next_operator_id_++; |
|
|
return next_operator_id_ - 1; |
|
|
} |
|
|
|
|
|
void ModelBuilder::AddMetadata(const char* description_string, |
|
|
const int32_t* metadata_buffer_data, |
|
|
size_t num_elements) { |
|
|
metadata_[ModelBuilder::nbr_of_metadata_buffers_] = |
|
|
CreateMetadata(*builder_, builder_->CreateString(description_string), |
|
|
1 + ModelBuilder::nbr_of_metadata_buffers_); |
|
|
|
|
|
metadata_buffers_[nbr_of_metadata_buffers_] = tflite::CreateBuffer( |
|
|
*builder_, builder_->CreateVector((uint8_t*)metadata_buffer_data, |
|
|
sizeof(uint32_t) * num_elements)); |
|
|
|
|
|
ModelBuilder::nbr_of_metadata_buffers_++; |
|
|
} |
|
|
|
|
|
const Model* ModelBuilder::BuildModel( |
|
|
std::initializer_list<ModelBuilder::Tensor> inputs, |
|
|
std::initializer_list<ModelBuilder::Tensor> outputs, |
|
|
size_t num_subgraph_inputs) { |
|
|
|
|
|
size_t buffer_size = 1 + ModelBuilder::nbr_of_metadata_buffers_; |
|
|
flatbuffers::Offset<Buffer> buffers[kMaxMetadataBuffers]; |
|
|
buffers[0] = tflite::CreateBuffer(*builder_); |
|
|
|
|
|
|
|
|
|
|
|
for (int i = 1; i < ModelBuilder::nbr_of_metadata_buffers_ + 1; ++i) { |
|
|
buffers[i] = metadata_buffers_[i - 1]; |
|
|
} |
|
|
|
|
|
|
|
|
constexpr size_t subgraphs_size = 1; |
|
|
|
|
|
|
|
|
if (num_subgraph_inputs == 0) { |
|
|
|
|
|
num_subgraph_inputs = inputs.size(); |
|
|
} else { |
|
|
|
|
|
|
|
|
TFLITE_DCHECK(num_subgraph_inputs <= inputs.size()); |
|
|
} |
|
|
|
|
|
const flatbuffers::Offset<SubGraph> subgraphs[subgraphs_size] = { |
|
|
tflite::CreateSubGraph( |
|
|
*builder_, builder_->CreateVector(tensors_, next_tensor_id_), |
|
|
builder_->CreateVector(inputs.begin(), num_subgraph_inputs), |
|
|
builder_->CreateVector(outputs.begin(), outputs.size()), |
|
|
builder_->CreateVector(operators_, next_operator_id_), |
|
|
builder_->CreateString("test_subgraph"))}; |
|
|
|
|
|
flatbuffers::Offset<Model> model_offset; |
|
|
if (ModelBuilder::nbr_of_metadata_buffers_ > 0) { |
|
|
model_offset = tflite::CreateModel( |
|
|
*builder_, 0, |
|
|
builder_->CreateVector(operator_codes_, next_operator_code_id_), |
|
|
builder_->CreateVector(subgraphs, subgraphs_size), |
|
|
builder_->CreateString("teset_model"), |
|
|
builder_->CreateVector(buffers, buffer_size), 0, |
|
|
builder_->CreateVector(metadata_, |
|
|
ModelBuilder::nbr_of_metadata_buffers_)); |
|
|
} else { |
|
|
model_offset = tflite::CreateModel( |
|
|
*builder_, 0, |
|
|
builder_->CreateVector(operator_codes_, next_operator_code_id_), |
|
|
builder_->CreateVector(subgraphs, subgraphs_size), |
|
|
builder_->CreateString("teset_model"), |
|
|
builder_->CreateVector(buffers, buffer_size)); |
|
|
} |
|
|
|
|
|
tflite::FinishModelBuffer(*builder_, model_offset); |
|
|
void* model_pointer = builder_->GetBufferPointer(); |
|
|
const Model* model = flatbuffers::GetRoot<Model>(model_pointer); |
|
|
return model; |
|
|
} |
|
|
|
|
|
ModelBuilder::Tensor ModelBuilder::AddTensorImpl( |
|
|
TensorType type, bool is_variable, std::initializer_list<int32_t> shape) { |
|
|
TFLITE_DCHECK(next_tensor_id_ <= kMaxTensors); |
|
|
tensors_[next_tensor_id_] = tflite::CreateTensor( |
|
|
*builder_, builder_->CreateVector(shape.begin(), shape.size()), type, |
|
|
0, 0, 0, |
|
|
is_variable, |
|
|
0); |
|
|
next_tensor_id_++; |
|
|
return next_tensor_id_ - 1; |
|
|
} |
|
|
|
|
|
const Model* BuildSimpleStatefulModel() { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); |
|
|
|
|
|
ModelBuilder model_builder(fb_builder); |
|
|
|
|
|
const int op_id = |
|
|
model_builder.RegisterOp(BuiltinOperator_CUSTOM, "simple_stateful_op"); |
|
|
const int input_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); |
|
|
const int median_tensor = model_builder.AddTensor(TensorType_UINT8, {3}); |
|
|
const int invoke_count_tensor = |
|
|
model_builder.AddTensor(TensorType_INT32, {1}); |
|
|
|
|
|
model_builder.AddNode(op_id, {input_tensor}, |
|
|
{median_tensor, invoke_count_tensor}); |
|
|
return model_builder.BuildModel({input_tensor}, |
|
|
{median_tensor, invoke_count_tensor}); |
|
|
} |
|
|
|
|
|
const Model* BuildSimpleModelWithBranch() { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); |
|
|
|
|
|
ModelBuilder model_builder(fb_builder); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
const int op_id = |
|
|
model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); |
|
|
const int t0 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); |
|
|
const int t1 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); |
|
|
const int t2 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); |
|
|
const int t3 = model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); |
|
|
model_builder.AddNode(op_id, {t0}, {t1}); |
|
|
model_builder.AddNode(op_id, {t0}, {t2}); |
|
|
model_builder.AddNode(op_id, {t1, t2}, {t3}); |
|
|
return model_builder.BuildModel({t0}, {t3}); |
|
|
} |
|
|
|
|
|
const Model* BuildModelWithOfflinePlanning(int number_of_tensors, |
|
|
const int32_t* metadata_buffer, |
|
|
NodeConnection* node_conn, |
|
|
int num_conns, |
|
|
int num_subgraph_inputs) { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* fb_builder = BuilderInstance(); |
|
|
|
|
|
ModelBuilder model_builder(fb_builder); |
|
|
|
|
|
const int op_id = |
|
|
model_builder.RegisterOp(BuiltinOperator_CUSTOM, "mock_custom"); |
|
|
|
|
|
for (int i = 0; i < number_of_tensors; ++i) { |
|
|
model_builder.AddTensor(TensorType_FLOAT32, {2, 2, 3}); |
|
|
} |
|
|
|
|
|
for (int i = 0; i < num_conns; ++i) { |
|
|
model_builder.AddNode(op_id, node_conn[i].input, node_conn[i].output); |
|
|
} |
|
|
|
|
|
model_builder.AddMetadata( |
|
|
"OfflineMemoryAllocation", metadata_buffer, |
|
|
number_of_tensors + tflite::testing::kOfflinePlannerHeaderSize); |
|
|
|
|
|
return model_builder.BuildModel( |
|
|
node_conn[0].input, node_conn[num_conns - 1].output, num_subgraph_inputs); |
|
|
} |
|
|
|
|
|
const Model* BuildSimpleMockModel() { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
|
|
|
constexpr size_t buffer_data_size = 1; |
|
|
const uint8_t buffer_data[buffer_data_size] = {21}; |
|
|
constexpr size_t buffers_size = 2; |
|
|
const Offset<Buffer> buffers[buffers_size] = { |
|
|
CreateBuffer(*builder), |
|
|
CreateBuffer(*builder, |
|
|
builder->CreateVector(buffer_data, buffer_data_size))}; |
|
|
constexpr size_t tensor_shape_size = 1; |
|
|
const int32_t tensor_shape[tensor_shape_size] = {1}; |
|
|
constexpr size_t tensors_size = 4; |
|
|
const Offset<Tensor> tensors[tensors_size] = { |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, |
|
|
builder->CreateString("test_input_tensor"), 0, false), |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_UINT8, 1, |
|
|
builder->CreateString("test_weight_tensor"), 0, false), |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, |
|
|
builder->CreateString("test_output_tensor"), 0, false), |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, |
|
|
builder->CreateString("test_output2_tensor"), 0, false), |
|
|
}; |
|
|
constexpr size_t inputs_size = 1; |
|
|
const int32_t inputs[inputs_size] = {0}; |
|
|
constexpr size_t outputs_size = 2; |
|
|
const int32_t outputs[outputs_size] = {2, 3}; |
|
|
constexpr size_t operator_inputs_size = 2; |
|
|
const int32_t operator_inputs[operator_inputs_size] = {0, 1}; |
|
|
constexpr size_t operator_outputs_size = 1; |
|
|
const int32_t operator_outputs[operator_outputs_size] = {2}; |
|
|
const int32_t operator2_outputs[operator_outputs_size] = {3}; |
|
|
constexpr size_t operators_size = 2; |
|
|
const Offset<Operator> operators[operators_size] = { |
|
|
CreateOperator( |
|
|
*builder, 0, |
|
|
builder->CreateVector(operator_inputs, operator_inputs_size), |
|
|
builder->CreateVector(operator_outputs, operator_outputs_size), |
|
|
BuiltinOptions_NONE), |
|
|
CreateOperator( |
|
|
*builder, 0, |
|
|
builder->CreateVector(operator_inputs, operator_inputs_size), |
|
|
builder->CreateVector(operator2_outputs, operator_outputs_size), |
|
|
BuiltinOptions_NONE), |
|
|
}; |
|
|
constexpr size_t subgraphs_size = 1; |
|
|
const Offset<SubGraph> subgraphs[subgraphs_size] = { |
|
|
CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), |
|
|
builder->CreateVector(inputs, inputs_size), |
|
|
builder->CreateVector(outputs, outputs_size), |
|
|
builder->CreateVector(operators, operators_size), |
|
|
builder->CreateString("test_subgraph"))}; |
|
|
constexpr size_t operator_codes_size = 1; |
|
|
const Offset<OperatorCode> operator_codes[operator_codes_size] = { |
|
|
CreateOperatorCodeDirect(*builder, 0, |
|
|
"mock_custom", |
|
|
0, BuiltinOperator_CUSTOM)}; |
|
|
const Offset<Model> model_offset = CreateModel( |
|
|
*builder, 0, builder->CreateVector(operator_codes, operator_codes_size), |
|
|
builder->CreateVector(subgraphs, subgraphs_size), |
|
|
builder->CreateString("test_model"), |
|
|
builder->CreateVector(buffers, buffers_size)); |
|
|
FinishModelBuffer(*builder, model_offset); |
|
|
void* model_pointer = builder->GetBufferPointer(); |
|
|
const Model* model = flatbuffers::GetRoot<Model>(model_pointer); |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* BuildComplexMockModel() { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
|
|
|
constexpr size_t buffer_data_size = 1; |
|
|
const uint8_t buffer_data_1[buffer_data_size] = {21}; |
|
|
const uint8_t buffer_data_2[buffer_data_size] = {21}; |
|
|
const uint8_t buffer_data_3[buffer_data_size] = {21}; |
|
|
constexpr size_t buffers_size = 7; |
|
|
const Offset<Buffer> buffers[buffers_size] = { |
|
|
|
|
|
CreateBuffer(*builder), |
|
|
CreateBuffer(*builder), |
|
|
CreateBuffer(*builder, |
|
|
builder->CreateVector(buffer_data_1, buffer_data_size)), |
|
|
|
|
|
CreateBuffer(*builder), |
|
|
CreateBuffer(*builder, |
|
|
builder->CreateVector(buffer_data_2, buffer_data_size)), |
|
|
|
|
|
CreateBuffer(*builder), |
|
|
CreateBuffer(*builder, |
|
|
builder->CreateVector(buffer_data_3, buffer_data_size)), |
|
|
}; |
|
|
constexpr size_t tensor_shape_size = 1; |
|
|
const int32_t tensor_shape[tensor_shape_size] = {1}; |
|
|
|
|
|
constexpr size_t tensors_size = 10; |
|
|
const Offset<Tensor> tensors[tensors_size] = { |
|
|
|
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_input_tensor_1"), 0, |
|
|
false ), |
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 1, builder->CreateString("test_variable_tensor_1"), |
|
|
0, true ), |
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_1"), 0, |
|
|
false ), |
|
|
|
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_output_tensor_1"), 0, |
|
|
false ), |
|
|
|
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 1, builder->CreateString("test_variable_tensor_2"), |
|
|
0, true ), |
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_2"), 0, |
|
|
false ), |
|
|
|
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_output_tensor_2"), 0, |
|
|
false ), |
|
|
|
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 1, builder->CreateString("test_variable_tensor_3"), |
|
|
0, true ), |
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_UINT8, 2, builder->CreateString("test_weight_tensor_3"), 0, |
|
|
false ), |
|
|
|
|
|
CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_output_tensor_3"), 0, |
|
|
false ), |
|
|
}; |
|
|
|
|
|
constexpr size_t operators_size = 3; |
|
|
Offset<Operator> operators[operators_size]; |
|
|
{ |
|
|
|
|
|
constexpr size_t operator_inputs_size = 3; |
|
|
const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; |
|
|
constexpr size_t operator_outputs_size = 1; |
|
|
const int32_t operator_outputs[operator_outputs_size] = {3}; |
|
|
|
|
|
operators[0] = {CreateOperator( |
|
|
*builder, 0, |
|
|
builder->CreateVector(operator_inputs, operator_inputs_size), |
|
|
builder->CreateVector(operator_outputs, operator_outputs_size), |
|
|
BuiltinOptions_NONE)}; |
|
|
} |
|
|
|
|
|
{ |
|
|
|
|
|
constexpr size_t operator_inputs_size = 3; |
|
|
const int32_t operator_inputs[operator_inputs_size] = {3, 4, 5}; |
|
|
constexpr size_t operator_outputs_size = 1; |
|
|
const int32_t operator_outputs[operator_outputs_size] = {6}; |
|
|
|
|
|
operators[1] = {CreateOperator( |
|
|
*builder, 0, |
|
|
builder->CreateVector(operator_inputs, operator_inputs_size), |
|
|
builder->CreateVector(operator_outputs, operator_outputs_size), |
|
|
BuiltinOptions_NONE)}; |
|
|
} |
|
|
|
|
|
{ |
|
|
|
|
|
constexpr size_t operator_inputs_size = 3; |
|
|
const int32_t operator_inputs[operator_inputs_size] = {6, 7, 8}; |
|
|
constexpr size_t operator_outputs_size = 1; |
|
|
const int32_t operator_outputs[operator_outputs_size] = {9}; |
|
|
|
|
|
operators[2] = {CreateOperator( |
|
|
*builder, 0, |
|
|
builder->CreateVector(operator_inputs, operator_inputs_size), |
|
|
builder->CreateVector(operator_outputs, operator_outputs_size), |
|
|
BuiltinOptions_NONE)}; |
|
|
} |
|
|
|
|
|
constexpr size_t inputs_size = 1; |
|
|
const int32_t inputs[inputs_size] = {0}; |
|
|
constexpr size_t outputs_size = 1; |
|
|
const int32_t outputs[outputs_size] = {9}; |
|
|
|
|
|
constexpr size_t subgraphs_size = 1; |
|
|
const Offset<SubGraph> subgraphs[subgraphs_size] = { |
|
|
CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), |
|
|
builder->CreateVector(inputs, inputs_size), |
|
|
builder->CreateVector(outputs, outputs_size), |
|
|
builder->CreateVector(operators, operators_size), |
|
|
builder->CreateString("test_subgraph"))}; |
|
|
|
|
|
constexpr size_t operator_codes_size = 1; |
|
|
const Offset<OperatorCode> operator_codes[operator_codes_size] = { |
|
|
CreateOperatorCodeDirect(*builder, 0, |
|
|
"mock_custom", |
|
|
0, BuiltinOperator_CUSTOM)}; |
|
|
|
|
|
const Offset<Model> model_offset = CreateModel( |
|
|
*builder, 0, builder->CreateVector(operator_codes, operator_codes_size), |
|
|
builder->CreateVector(subgraphs, subgraphs_size), |
|
|
builder->CreateString("test_model"), |
|
|
builder->CreateVector(buffers, buffers_size)); |
|
|
|
|
|
FinishModelBuffer(*builder, model_offset); |
|
|
void* model_pointer = builder->GetBufferPointer(); |
|
|
const Model* model = flatbuffers::GetRoot<Model>(model_pointer); |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* BuildSimpleMultipleInputsModel() { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
|
|
|
constexpr size_t buffers_size = 1; |
|
|
const Offset<Buffer> buffers[buffers_size] = { |
|
|
CreateBuffer(*builder), |
|
|
}; |
|
|
constexpr size_t tensor_shape_size = 1; |
|
|
const int32_t tensor_shape[tensor_shape_size] = {1}; |
|
|
constexpr size_t tensors_size = 4; |
|
|
const Offset<Tensor> tensors[tensors_size] = { |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, |
|
|
builder->CreateString("test_input_tensor1"), 0, false), |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT8, 0, |
|
|
builder->CreateString("test_input_tensor2"), 0, false), |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, |
|
|
builder->CreateString("test_input_tensor3"), 0, false), |
|
|
CreateTensor(*builder, |
|
|
builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, |
|
|
builder->CreateString("test_output_tensor"), 0, false), |
|
|
}; |
|
|
constexpr size_t inputs_size = 3; |
|
|
const int32_t inputs[inputs_size] = {0, 1, 2}; |
|
|
constexpr size_t outputs_size = 1; |
|
|
const int32_t outputs[outputs_size] = {3}; |
|
|
constexpr size_t operator_inputs_size = 3; |
|
|
const int32_t operator_inputs[operator_inputs_size] = {0, 1, 2}; |
|
|
constexpr size_t operator_outputs_size = 1; |
|
|
const int32_t operator_outputs[operator_outputs_size] = {3}; |
|
|
constexpr size_t operators_size = 1; |
|
|
const Offset<Operator> operators[operators_size] = { |
|
|
CreateOperator( |
|
|
*builder, 0, |
|
|
builder->CreateVector(operator_inputs, operator_inputs_size), |
|
|
builder->CreateVector(operator_outputs, operator_outputs_size), |
|
|
BuiltinOptions_NONE), |
|
|
}; |
|
|
constexpr size_t subgraphs_size = 1; |
|
|
const Offset<SubGraph> subgraphs[subgraphs_size] = { |
|
|
CreateSubGraph(*builder, builder->CreateVector(tensors, tensors_size), |
|
|
builder->CreateVector(inputs, inputs_size), |
|
|
builder->CreateVector(outputs, outputs_size), |
|
|
builder->CreateVector(operators, operators_size), |
|
|
builder->CreateString("test_subgraph"))}; |
|
|
constexpr size_t operator_codes_size = 1; |
|
|
const Offset<OperatorCode> operator_codes[operator_codes_size] = { |
|
|
CreateOperatorCodeDirect(*builder, 0, |
|
|
"multiple_inputs_op", |
|
|
0, BuiltinOperator_CUSTOM)}; |
|
|
const Offset<Model> model_offset = CreateModel( |
|
|
*builder, 0, builder->CreateVector(operator_codes, operator_codes_size), |
|
|
builder->CreateVector(subgraphs, subgraphs_size), |
|
|
builder->CreateString("test_model"), |
|
|
builder->CreateVector(buffers, buffers_size)); |
|
|
FinishModelBuffer(*builder, model_offset); |
|
|
void* model_pointer = builder->GetBufferPointer(); |
|
|
const Model* model = flatbuffers::GetRoot<Model>(model_pointer); |
|
|
return model; |
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
const TfLiteRegistration* SimpleStatefulOp::getRegistration() { |
|
|
return GetMutableRegistration(); |
|
|
} |
|
|
|
|
|
TfLiteRegistration* SimpleStatefulOp::GetMutableRegistration() { |
|
|
static TfLiteRegistration r; |
|
|
r.init = Init; |
|
|
r.prepare = Prepare; |
|
|
r.invoke = Invoke; |
|
|
return &r; |
|
|
} |
|
|
|
|
|
void* SimpleStatefulOp::Init(TfLiteContext* context, const char* buffer, |
|
|
size_t length) { |
|
|
TFLITE_DCHECK(context->AllocateBufferForEval == nullptr); |
|
|
TFLITE_DCHECK(context->GetScratchBuffer == nullptr); |
|
|
TFLITE_DCHECK(context->RequestScratchBufferInArena == nullptr); |
|
|
|
|
|
void* raw = context->AllocatePersistentBuffer(context, sizeof(OpData)); |
|
|
OpData* data = reinterpret_cast<OpData*>(raw); |
|
|
*data = {}; |
|
|
return raw; |
|
|
} |
|
|
|
|
|
TfLiteStatus SimpleStatefulOp::Prepare(TfLiteContext* context, |
|
|
TfLiteNode* node) { |
|
|
OpData* data = reinterpret_cast<OpData*>(node->user_data); |
|
|
|
|
|
|
|
|
const TfLiteTensor* input; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); |
|
|
if (input->type != kTfLiteUInt8) return kTfLiteError; |
|
|
if (NumElements(input->dims) == 0) return kTfLiteError; |
|
|
|
|
|
|
|
|
TF_LITE_ENSURE_STATUS(context->RequestScratchBufferInArena( |
|
|
context, sizeof(uint8_t) * NumElements(input->dims), |
|
|
&data->sorting_buffer)); |
|
|
|
|
|
data->invoke_count = reinterpret_cast<int*>( |
|
|
context->AllocatePersistentBuffer(context, sizeof(int))); |
|
|
*data->invoke_count = 0; |
|
|
|
|
|
return kTfLiteOk; |
|
|
} |
|
|
|
|
|
TfLiteStatus SimpleStatefulOp::Invoke(TfLiteContext* context, |
|
|
TfLiteNode* node) { |
|
|
OpData* data = reinterpret_cast<OpData*>(node->user_data); |
|
|
*data->invoke_count += 1; |
|
|
|
|
|
const TfLiteTensor* input; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, kInputTensor, &input)); |
|
|
const uint8_t* input_data = GetTensorData<uint8_t>(input); |
|
|
int size = NumElements(input->dims); |
|
|
|
|
|
uint8_t* sorting_buffer = reinterpret_cast<uint8_t*>( |
|
|
context->GetScratchBuffer(context, data->sorting_buffer)); |
|
|
|
|
|
|
|
|
for (int i = 0; i < size; i++) { |
|
|
sorting_buffer[i] = input_data[i]; |
|
|
} |
|
|
|
|
|
|
|
|
for (int i = 1; i < size; i++) { |
|
|
for (int j = i; j > 0 && sorting_buffer[j] < sorting_buffer[j - 1]; j--) { |
|
|
std::swap(sorting_buffer[j], sorting_buffer[j - 1]); |
|
|
} |
|
|
} |
|
|
|
|
|
TfLiteTensor* median; |
|
|
TF_LITE_ENSURE_OK(context, |
|
|
GetOutputSafe(context, node, kMedianTensor, &median)); |
|
|
uint8_t* median_data = GetTensorData<uint8_t>(median); |
|
|
TfLiteTensor* invoke_count; |
|
|
TF_LITE_ENSURE_OK(context, |
|
|
GetOutputSafe(context, node, kInvokeCount, &invoke_count)); |
|
|
int32_t* invoke_count_data = GetTensorData<int32_t>(invoke_count); |
|
|
|
|
|
median_data[0] = sorting_buffer[size / 2]; |
|
|
invoke_count_data[0] = *data->invoke_count; |
|
|
return kTfLiteOk; |
|
|
} |
|
|
|
|
|
const TfLiteRegistration* MockCustom::getRegistration() { |
|
|
return GetMutableRegistration(); |
|
|
} |
|
|
|
|
|
TfLiteRegistration* MockCustom::GetMutableRegistration() { |
|
|
static TfLiteRegistration r; |
|
|
r.init = Init; |
|
|
r.prepare = Prepare; |
|
|
r.invoke = Invoke; |
|
|
r.free = Free; |
|
|
return &r; |
|
|
} |
|
|
|
|
|
void* MockCustom::Init(TfLiteContext* context, const char* buffer, |
|
|
size_t length) { |
|
|
|
|
|
|
|
|
TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); |
|
|
freed_ = false; |
|
|
|
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
void MockCustom::Free(TfLiteContext* context, void* buffer) { freed_ = true; } |
|
|
|
|
|
TfLiteStatus MockCustom::Prepare(TfLiteContext* context, TfLiteNode* node) { |
|
|
return kTfLiteOk; |
|
|
} |
|
|
|
|
|
TfLiteStatus MockCustom::Invoke(TfLiteContext* context, TfLiteNode* node) { |
|
|
const TfLiteTensor* input; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); |
|
|
const int32_t* input_data = input->data.i32; |
|
|
const TfLiteTensor* weight; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &weight)); |
|
|
const uint8_t* weight_data = weight->data.uint8; |
|
|
TfLiteTensor* output; |
|
|
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); |
|
|
int32_t* output_data = output->data.i32; |
|
|
output_data[0] = |
|
|
0; |
|
|
output_data[0] = input_data[0] + weight_data[0]; |
|
|
return kTfLiteOk; |
|
|
} |
|
|
|
|
|
bool MockCustom::freed_ = false; |
|
|
|
|
|
const TfLiteRegistration* MultipleInputs::getRegistration() { |
|
|
return GetMutableRegistration(); |
|
|
} |
|
|
|
|
|
TfLiteRegistration* MultipleInputs::GetMutableRegistration() { |
|
|
static TfLiteRegistration r; |
|
|
r.init = Init; |
|
|
r.prepare = Prepare; |
|
|
r.invoke = Invoke; |
|
|
r.free = Free; |
|
|
return &r; |
|
|
} |
|
|
|
|
|
void* MultipleInputs::Init(TfLiteContext* context, const char* buffer, |
|
|
size_t length) { |
|
|
|
|
|
|
|
|
TFLITE_DCHECK(context->ReplaceNodeSubsetsWithDelegateKernels == nullptr); |
|
|
freed_ = false; |
|
|
|
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
void MultipleInputs::Free(TfLiteContext* context, void* buffer) { |
|
|
freed_ = true; |
|
|
} |
|
|
|
|
|
TfLiteStatus MultipleInputs::Prepare(TfLiteContext* context, TfLiteNode* node) { |
|
|
return kTfLiteOk; |
|
|
} |
|
|
|
|
|
TfLiteStatus MultipleInputs::Invoke(TfLiteContext* context, TfLiteNode* node) { |
|
|
const TfLiteTensor* input; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 0, &input)); |
|
|
const int32_t* input_data = input->data.i32; |
|
|
const TfLiteTensor* input1; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 1, &input1)); |
|
|
const int32_t* input_data1 = input1->data.i32; |
|
|
const TfLiteTensor* input2; |
|
|
TF_LITE_ENSURE_OK(context, GetInputSafe(context, node, 2, &input2)); |
|
|
const int32_t* input_data2 = input2->data.i32; |
|
|
|
|
|
TfLiteTensor* output; |
|
|
TF_LITE_ENSURE_OK(context, GetOutputSafe(context, node, 0, &output)); |
|
|
int32_t* output_data = output->data.i32; |
|
|
output_data[0] = |
|
|
0; |
|
|
output_data[0] = input_data[0] + input_data1[0] + input_data2[0]; |
|
|
return kTfLiteOk; |
|
|
} |
|
|
|
|
|
bool MultipleInputs::freed_ = false; |
|
|
|
|
|
AllOpsResolver GetOpResolver() { |
|
|
AllOpsResolver op_resolver; |
|
|
op_resolver.AddCustom("mock_custom", MockCustom::GetMutableRegistration()); |
|
|
op_resolver.AddCustom("simple_stateful_op", |
|
|
SimpleStatefulOp::GetMutableRegistration()); |
|
|
op_resolver.AddCustom("multiple_inputs_op", |
|
|
MultipleInputs::GetMutableRegistration()); |
|
|
return op_resolver; |
|
|
} |
|
|
|
|
|
const Model* GetSimpleMockModel() { |
|
|
static Model* model = nullptr; |
|
|
if (!model) { |
|
|
model = const_cast<Model*>(BuildSimpleMockModel()); |
|
|
} |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* GetSimpleMultipleInputsModel() { |
|
|
static Model* model = nullptr; |
|
|
if (!model) { |
|
|
model = const_cast<Model*>(BuildSimpleMultipleInputsModel()); |
|
|
} |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* GetComplexMockModel() { |
|
|
static Model* model = nullptr; |
|
|
if (!model) { |
|
|
model = const_cast<Model*>(BuildComplexMockModel()); |
|
|
} |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* GetSimpleModelWithBranch() { |
|
|
static Model* model = nullptr; |
|
|
if (!model) { |
|
|
model = const_cast<Model*>(BuildSimpleModelWithBranch()); |
|
|
} |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* GetModelWithOfflinePlanning(int num_tensors, |
|
|
const int32_t* metadata_buffer, |
|
|
NodeConnection* node_conn, |
|
|
int num_conns, |
|
|
int num_subgraph_inputs) { |
|
|
const Model* model = BuildModelWithOfflinePlanning( |
|
|
num_tensors, metadata_buffer, node_conn, num_conns, num_subgraph_inputs); |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Model* GetSimpleStatefulModel() { |
|
|
static Model* model = nullptr; |
|
|
if (!model) { |
|
|
model = const_cast<Model*>(BuildSimpleStatefulModel()); |
|
|
} |
|
|
return model; |
|
|
} |
|
|
|
|
|
const Tensor* Create1dFlatbufferTensor(int size, bool is_variable) { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
constexpr size_t tensor_shape_size = 1; |
|
|
const int32_t tensor_shape[tensor_shape_size] = {size}; |
|
|
const Offset<Tensor> tensor_offset = CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_tensor"), 0, |
|
|
is_variable); |
|
|
builder->Finish(tensor_offset); |
|
|
void* tensor_pointer = builder->GetBufferPointer(); |
|
|
const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer); |
|
|
return tensor; |
|
|
} |
|
|
|
|
|
const Tensor* CreateQuantizedFlatbufferTensor(int size) { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
const Offset<QuantizationParameters> quant_params = |
|
|
CreateQuantizationParameters( |
|
|
*builder, |
|
|
builder->CreateVector<float>({0.1f}), |
|
|
builder->CreateVector<float>({0.2f}), |
|
|
builder->CreateVector<float>({0.3f}), |
|
|
builder->CreateVector<int64_t>({100ll})); |
|
|
|
|
|
constexpr size_t tensor_shape_size = 1; |
|
|
const int32_t tensor_shape[tensor_shape_size] = {size}; |
|
|
const Offset<Tensor> tensor_offset = CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, |
|
|
false); |
|
|
builder->Finish(tensor_offset); |
|
|
void* tensor_pointer = builder->GetBufferPointer(); |
|
|
const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer); |
|
|
return tensor; |
|
|
} |
|
|
|
|
|
const Tensor* CreateMissingQuantizationFlatbufferTensor(int size) { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
const Offset<QuantizationParameters> quant_params = |
|
|
CreateQuantizationParameters(*builder, 0, 0, 0, 0, |
|
|
QuantizationDetails_NONE, 0, 0); |
|
|
constexpr size_t tensor_shape_size = 1; |
|
|
const int32_t tensor_shape[tensor_shape_size] = {size}; |
|
|
const Offset<Tensor> tensor_offset = CreateTensor( |
|
|
*builder, builder->CreateVector(tensor_shape, tensor_shape_size), |
|
|
TensorType_INT32, 0, builder->CreateString("test_tensor"), quant_params, |
|
|
false); |
|
|
builder->Finish(tensor_offset); |
|
|
void* tensor_pointer = builder->GetBufferPointer(); |
|
|
const Tensor* tensor = flatbuffers::GetRoot<Tensor>(tensor_pointer); |
|
|
return tensor; |
|
|
} |
|
|
|
|
|
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* |
|
|
CreateFlatbufferBuffers() { |
|
|
using flatbuffers::Offset; |
|
|
flatbuffers::FlatBufferBuilder* builder = BuilderInstance(); |
|
|
constexpr size_t buffers_size = 1; |
|
|
const Offset<Buffer> buffers[buffers_size] = { |
|
|
CreateBuffer(*builder), |
|
|
}; |
|
|
const flatbuffers::Offset<flatbuffers::Vector<flatbuffers::Offset<Buffer>>> |
|
|
buffers_offset = builder->CreateVector(buffers, buffers_size); |
|
|
builder->Finish(buffers_offset); |
|
|
void* buffers_pointer = builder->GetBufferPointer(); |
|
|
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* result = |
|
|
flatbuffers::GetRoot<flatbuffers::Vector<flatbuffers::Offset<Buffer>>>( |
|
|
buffers_pointer); |
|
|
return result; |
|
|
} |
|
|
|
|
|
int TestStrcmp(const char* a, const char* b) { |
|
|
if ((a == nullptr) || (b == nullptr)) { |
|
|
return -1; |
|
|
} |
|
|
while ((*a != 0) && (*a == *b)) { |
|
|
a++; |
|
|
b++; |
|
|
} |
|
|
return *reinterpret_cast<const unsigned char*>(a) - |
|
|
*reinterpret_cast<const unsigned char*>(b); |
|
|
} |
|
|
|
|
|
|
|
|
void ReportOpError(struct TfLiteContext* context, const char* format, ...) { |
|
|
#ifndef TF_LITE_STRIP_ERROR_STRINGS |
|
|
ErrorReporter* error_reporter = static_cast<ErrorReporter*>(context->impl_); |
|
|
va_list args; |
|
|
va_start(args, format); |
|
|
TF_LITE_REPORT_ERROR(error_reporter, format, args); |
|
|
va_end(args); |
|
|
#endif |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
TfLiteIntArray* IntArrayFromInts(const int* int_array) { |
|
|
return const_cast<TfLiteIntArray*>( |
|
|
reinterpret_cast<const TfLiteIntArray*>(int_array)); |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
TfLiteFloatArray* FloatArrayFromFloats(const float* floats) { |
|
|
static_assert(sizeof(float) == sizeof(int), |
|
|
"assumes sizeof(float) == sizeof(int) to perform casting"); |
|
|
int size = static_cast<int>(floats[0]); |
|
|
*reinterpret_cast<int32_t*>(const_cast<float*>(floats)) = size; |
|
|
return reinterpret_cast<TfLiteFloatArray*>(const_cast<float*>(floats)); |
|
|
} |
|
|
|
|
|
TfLiteTensor CreateQuantizedBiasTensor(const float* data, int32_t* quantized, |
|
|
TfLiteIntArray* dims, float input_scale, |
|
|
float weights_scale, bool is_variable) { |
|
|
float bias_scale = input_scale * weights_scale; |
|
|
tflite::SymmetricQuantize(data, quantized, ElementCount(*dims), bias_scale); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TfLiteTensor result = |
|
|
CreateQuantizedTensor(quantized, dims, bias_scale, 0, is_variable); |
|
|
return result; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
TfLiteTensor CreatePerChannelQuantizedBiasTensor( |
|
|
const float* input, int32_t* quantized, TfLiteIntArray* dims, |
|
|
float input_scale, float* weight_scales, float* scales, int* zero_points, |
|
|
TfLiteAffineQuantization* affine_quant, int quantized_dimension, |
|
|
bool is_variable) { |
|
|
int input_size = ElementCount(*dims); |
|
|
int num_channels = dims->data[quantized_dimension]; |
|
|
|
|
|
zero_points[0] = num_channels; |
|
|
scales[0] = static_cast<float>(num_channels); |
|
|
float* scales_array = &scales[1]; |
|
|
for (int i = 0; i < num_channels; i++) { |
|
|
scales_array[i] = input_scale * weight_scales[i]; |
|
|
zero_points[i + 1] = 0; |
|
|
} |
|
|
|
|
|
SymmetricPerChannelQuantize<int32_t>(input, quantized, input_size, |
|
|
num_channels, scales_array); |
|
|
|
|
|
affine_quant->scale = FloatArrayFromFloats(scales); |
|
|
affine_quant->zero_point = IntArrayFromInts(zero_points); |
|
|
affine_quant->quantized_dimension = quantized_dimension; |
|
|
|
|
|
TfLiteTensor result = CreateTensor(quantized, dims, is_variable); |
|
|
result.quantization = {kTfLiteAffineQuantization, affine_quant}; |
|
|
return result; |
|
|
} |
|
|
|
|
|
TfLiteTensor CreateSymmetricPerChannelQuantizedTensor( |
|
|
const float* input, int8_t* quantized, TfLiteIntArray* dims, float* scales, |
|
|
int* zero_points, TfLiteAffineQuantization* affine_quant, |
|
|
int quantized_dimension, bool is_variable) { |
|
|
int channel_count = dims->data[quantized_dimension]; |
|
|
scales[0] = static_cast<float>(channel_count); |
|
|
zero_points[0] = channel_count; |
|
|
|
|
|
SignedSymmetricPerChannelQuantize(input, dims, quantized_dimension, quantized, |
|
|
&scales[1]); |
|
|
|
|
|
for (int i = 0; i < channel_count; i++) { |
|
|
zero_points[i + 1] = 0; |
|
|
} |
|
|
|
|
|
affine_quant->scale = FloatArrayFromFloats(scales); |
|
|
affine_quant->zero_point = IntArrayFromInts(zero_points); |
|
|
affine_quant->quantized_dimension = quantized_dimension; |
|
|
|
|
|
TfLiteTensor result = CreateTensor(quantized, dims, is_variable); |
|
|
result.quantization = {kTfLiteAffineQuantization, affine_quant}; |
|
|
return result; |
|
|
} |
|
|
|
|
|
size_t GetModelTensorCount(const Model* model) { |
|
|
auto* subgraphs = model->subgraphs(); |
|
|
if (subgraphs) { |
|
|
return (*subgraphs)[0]->tensors()->size(); |
|
|
} |
|
|
return 0; |
|
|
} |
|
|
|
|
|
} |
|
|
} |
|
|
|