|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
#ifndef TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ |
|
|
#define TENSORFLOW_LITE_MICRO_MICRO_INTERPRETER_H_ |
|
|
|
|
|
#include <cstddef> |
|
|
#include <cstdint> |
|
|
|
|
|
#include "edge-impulse-sdk/third_party/flatbuffers/include/flatbuffers/flatbuffers.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/c/common.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/core/api/error_reporter.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/kernels/internal/tensor_ctypes.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/micro_allocator.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/micro_op_resolver.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/micro/micro_profiler.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/portable_type_to_tflitetype.h" |
|
|
#include "edge-impulse-sdk/tensorflow/lite/schema/schema_generated.h" |
|
|
|
|
|
|
|
|
|
|
|
#define TFLITE_SCHEMA_VERSION (3) |
|
|
|
|
|
namespace tflite { |
|
|
|
|
|
namespace internal { |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class ContextHelper { |
|
|
public: |
|
|
explicit ContextHelper(ErrorReporter* error_reporter, |
|
|
MicroAllocator* allocator, const Model* model); |
|
|
|
|
|
|
|
|
static void* AllocatePersistentBuffer(TfLiteContext* ctx, size_t bytes); |
|
|
static TfLiteStatus RequestScratchBufferInArena(TfLiteContext* ctx, |
|
|
size_t bytes, |
|
|
int* buffer_idx); |
|
|
static void* GetScratchBuffer(TfLiteContext* ctx, int buffer_idx); |
|
|
static void ReportOpError(struct TfLiteContext* context, const char* format, |
|
|
...); |
|
|
static TfLiteTensor* GetTensor(const struct TfLiteContext* context, |
|
|
int tensor_idx); |
|
|
static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context, |
|
|
int tensor_idx); |
|
|
|
|
|
|
|
|
void SetTfLiteEvalTensors(TfLiteEvalTensor* eval_tensors); |
|
|
|
|
|
|
|
|
void SetScratchBufferHandles(ScratchBufferHandle* scratch_buffer_handles); |
|
|
|
|
|
private: |
|
|
MicroAllocator* allocator_ = nullptr; |
|
|
ErrorReporter* error_reporter_ = nullptr; |
|
|
const Model* model_ = nullptr; |
|
|
TfLiteEvalTensor* eval_tensors_ = nullptr; |
|
|
ScratchBufferHandle* scratch_buffer_handles_ = nullptr; |
|
|
}; |
|
|
|
|
|
} |
|
|
|
|
|
class MicroInterpreter { |
|
|
public: |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, |
|
|
uint8_t* tensor_arena, size_t tensor_arena_size, |
|
|
ErrorReporter* error_reporter, |
|
|
MicroProfiler* profiler = nullptr); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
MicroInterpreter(const Model* model, const MicroOpResolver& op_resolver, |
|
|
MicroAllocator* allocator, ErrorReporter* error_reporter, |
|
|
MicroProfiler* profiler = nullptr); |
|
|
|
|
|
~MicroInterpreter(); |
|
|
|
|
|
|
|
|
|
|
|
TfLiteStatus AllocateTensors(bool run_all_prep_ops = false); |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
TfLiteStatus Invoke(); |
|
|
|
|
|
size_t tensors_size() const { return context_.tensors_size; } |
|
|
TfLiteTensor* tensor(size_t tensor_index); |
|
|
template <class T> |
|
|
T* typed_tensor(int tensor_index) { |
|
|
if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) { |
|
|
if (tensor_ptr->type == typeToTfLiteType<T>()) { |
|
|
return GetTensorData<T>(tensor_ptr); |
|
|
} |
|
|
} |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
TfLiteTensor* input(size_t index); |
|
|
size_t inputs_size() const { return subgraph_->inputs()->Length(); } |
|
|
const flatbuffers::Vector<int32_t>& inputs() const { |
|
|
return *subgraph_->inputs(); |
|
|
} |
|
|
TfLiteTensor* input_tensor(size_t index) { return input(index); } |
|
|
template <class T> |
|
|
T* typed_input_tensor(int tensor_index) { |
|
|
if (TfLiteTensor* tensor_ptr = input_tensor(tensor_index)) { |
|
|
if (tensor_ptr->type == typeToTfLiteType<T>()) { |
|
|
return GetTensorData<T>(tensor_ptr); |
|
|
} |
|
|
} |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
TfLiteTensor* output(size_t index); |
|
|
size_t outputs_size() const { return subgraph_->outputs()->Length(); } |
|
|
const flatbuffers::Vector<int32_t>& outputs() const { |
|
|
return *subgraph_->outputs(); |
|
|
} |
|
|
TfLiteTensor* output_tensor(size_t index) { return output(index); } |
|
|
template <class T> |
|
|
T* typed_output_tensor(int tensor_index) { |
|
|
if (TfLiteTensor* tensor_ptr = output_tensor(tensor_index)) { |
|
|
if (tensor_ptr->type == typeToTfLiteType<T>()) { |
|
|
return GetTensorData<T>(tensor_ptr); |
|
|
} |
|
|
} |
|
|
return nullptr; |
|
|
} |
|
|
|
|
|
|
|
|
TfLiteStatus ResetVariableTensors(); |
|
|
|
|
|
TfLiteStatus initialization_status() const { return initialization_status_; } |
|
|
|
|
|
size_t operators_size() const { return subgraph_->operators()->size(); } |
|
|
|
|
|
|
|
|
const NodeAndRegistration node_and_registration(int node_index) const { |
|
|
return node_and_registrations_[node_index]; |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
size_t arena_used_bytes() const { return allocator_.used_bytes(); } |
|
|
|
|
|
protected: |
|
|
const MicroAllocator& allocator() const { return allocator_; } |
|
|
const TfLiteContext& context() const { return context_; } |
|
|
|
|
|
private: |
|
|
|
|
|
|
|
|
void Init(MicroProfiler* profiler); |
|
|
|
|
|
NodeAndRegistration* node_and_registrations_ = nullptr; |
|
|
|
|
|
const Model* model_; |
|
|
const MicroOpResolver& op_resolver_; |
|
|
ErrorReporter* error_reporter_; |
|
|
TfLiteContext context_ = {}; |
|
|
MicroAllocator& allocator_; |
|
|
bool tensors_allocated_; |
|
|
|
|
|
TfLiteStatus initialization_status_; |
|
|
|
|
|
const SubGraph* subgraph_ = nullptr; |
|
|
TfLiteEvalTensor* eval_tensors_ = nullptr; |
|
|
ScratchBufferHandle* scratch_buffer_handles_ = nullptr; |
|
|
|
|
|
|
|
|
internal::ContextHelper context_helper_; |
|
|
|
|
|
|
|
|
|
|
|
TfLiteTensor** input_tensors_; |
|
|
TfLiteTensor** output_tensors_; |
|
|
}; |
|
|
|
|
|
} |
|
|
|
|
|
#endif |
|
|
|