code stringlengths 1 1.05M | repo_name stringlengths 6 83 | path stringlengths 3 242 | language stringclasses 222
values | license stringclasses 20
values | size int64 1 1.05M |
|---|---|---|---|---|---|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h"
void NoiseReductionWriteMemmapPreamble(
FILE* fp, const struct NoiseReductionState* state) {
fprintf(fp, "static uint32_t noise_reduction_estimate[%zu];\n",
state->num_channels);
fprintf(fp, "\n");
}
void NoiseReductionWriteMemmap(FILE* fp,
const struct NoiseReductionState* state,
const char* variable) {
fprintf(fp, "%s->even_smoothing = %d;\n", variable, state->even_smoothing);
fprintf(fp, "%s->odd_smoothing = %d;\n", variable, state->odd_smoothing);
fprintf(fp, "%s->min_signal_remaining = %d;\n", variable,
state->min_signal_remaining);
fprintf(fp, "%s->num_channels = %d;\n", variable, state->num_channels);
fprintf(fp, "%s->estimate = noise_reduction_estimate;\n", variable);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.c | C | apache-2.0 | 1,538 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_IO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_IO_H_
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
#ifdef __cplusplus
extern "C" {
#endif
void NoiseReductionWriteMemmapPreamble(FILE* fp,
const struct NoiseReductionState* state);
void NoiseReductionWriteMemmap(FILE* fp,
const struct NoiseReductionState* state,
const char* variable);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_IO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_io.h | C | apache-2.0 | 1,383 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int kNumChannels = 2;
// Test noise reduction using default config values.
class NoiseReductionTestConfig {
public:
NoiseReductionTestConfig() {
config_.smoothing_bits = 10;
config_.even_smoothing = 0.025;
config_.odd_smoothing = 0.06;
config_.min_signal_remaining = 0.05;
}
struct NoiseReductionConfig config_;
};
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReductionEstimate) {
NoiseReductionTestConfig config;
struct NoiseReductionState state;
TF_LITE_MICRO_EXPECT(
NoiseReductionPopulateState(&config.config_, &state, kNumChannels));
uint32_t signal[] = {247311, 508620};
NoiseReductionApply(&state, signal);
const uint32_t expected[] = {6321887, 31248341};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.estimate[i], expected[i]);
}
NoiseReductionFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(NoiseReductionTest_TestNoiseReduction) {
NoiseReductionTestConfig config;
struct NoiseReductionState state;
TF_LITE_MICRO_EXPECT(
NoiseReductionPopulateState(&config.config_, &state, kNumChannels));
uint32_t signal[] = {247311, 508620};
NoiseReductionApply(&state, signal);
const uint32_t expected[] = {241137, 478104};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]);
}
NoiseReductionFreeStateContents(&state);
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_test.cc | C++ | apache-2.0 | 2,589 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h"
#include <stdio.h>
void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config) {
config->smoothing_bits = 10;
config->even_smoothing = 0.025;
config->odd_smoothing = 0.06;
config->min_signal_remaining = 0.05;
}
int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
struct NoiseReductionState* state,
int num_channels) {
state->smoothing_bits = config->smoothing_bits;
state->odd_smoothing = config->odd_smoothing * (1 << kNoiseReductionBits);
state->even_smoothing = config->even_smoothing * (1 << kNoiseReductionBits);
state->min_signal_remaining =
config->min_signal_remaining * (1 << kNoiseReductionBits);
state->num_channels = num_channels;
state->estimate = calloc(state->num_channels, sizeof(*state->estimate));
if (state->estimate == NULL) {
fprintf(stderr, "Failed to alloc estimate buffer\n");
return 0;
}
return 1;
}
void NoiseReductionFreeStateContents(struct NoiseReductionState* state) {
free(state->estimate);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.c | C | apache-2.0 | 1,826 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
#include "tensorflow/lite/experimental/microfrontend/lib/noise_reduction.h"
#ifdef __cplusplus
extern "C" {
#endif
struct NoiseReductionConfig {
// scale the signal up by 2^(smoothing_bits) before reduction
int smoothing_bits;
// smoothing coefficient for even-numbered channels
float even_smoothing;
// smoothing coefficient for odd-numbered channels
float odd_smoothing;
// fraction of signal to preserve (1.0 disables this module)
float min_signal_remaining;
};
// Populates the NoiseReductionConfig with "sane" default values.
void NoiseReductionFillConfigWithDefaults(struct NoiseReductionConfig* config);
// Allocates any buffers.
int NoiseReductionPopulateState(const struct NoiseReductionConfig* config,
struct NoiseReductionState* state,
int num_channels);
// Frees any allocated buffers.
void NoiseReductionFreeStateContents(struct NoiseReductionState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_NOISE_REDUCTION_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/noise_reduction_util.h | C | apache-2.0 | 1,911 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
#include "tensorflow/lite/experimental/microfrontend/lib/bits.h"
int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut) {
if (x <= 2) {
return lut[x];
}
const int16_t interval = MostSignificantBit32(x);
lut += 4 * interval - 6;
const int16_t frac =
((interval < 11) ? (x << (11 - interval)) : (x >> (interval - 11))) &
0x3FF;
int32_t result = ((int32_t)lut[2] * frac) >> 5;
result += (int32_t)((uint32_t)lut[1] << 5);
result *= frac;
result = (result + (1 << 14)) >> 15;
result += lut[0];
return (int16_t)result;
}
uint32_t PcanShrink(const uint32_t x) {
if (x < (2 << kPcanSnrBits)) {
return (x * x) >> (2 + 2 * kPcanSnrBits - kPcanOutputBits);
} else {
return (x >> (kPcanSnrBits - kPcanOutputBits)) - (1 << kPcanOutputBits);
}
}
void PcanGainControlApply(struct PcanGainControlState* state,
uint32_t* signal) {
int i;
for (i = 0; i < state->num_channels; ++i) {
const uint32_t gain =
WideDynamicFunction(state->noise_estimate[i], state->gain_lut);
const uint32_t snr = ((uint64_t)signal[i] * gain) >> state->snr_shift;
signal[i] = PcanShrink(snr);
}
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.c | C | apache-2.0 | 1,924 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
#include <stdint.h>
#include <stdlib.h>
#define kPcanSnrBits 12
#define kPcanOutputBits 6
#ifdef __cplusplus
extern "C" {
#endif
// Details at https://research.google/pubs/pub45911.pdf
struct PcanGainControlState {
int enable_pcan;
uint32_t* noise_estimate;
int num_channels;
int16_t* gain_lut;
int32_t snr_shift;
};
int16_t WideDynamicFunction(const uint32_t x, const int16_t* lut);
uint32_t PcanShrink(const uint32_t x);
void PcanGainControlApply(struct PcanGainControlState* state, uint32_t* signal);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h | C | apache-2.0 | 1,466 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int kNumChannels = 2;
const int kSmoothingBits = 10;
const int kCorrectionBits = -1;
// Test pcan auto gain control using default config values.
class PcanGainControlTestConfig {
public:
PcanGainControlTestConfig() {
config_.enable_pcan = 1;
config_.strength = 0.95;
config_.offset = 80.0;
config_.gain_bits = 21;
}
struct PcanGainControlConfig config_;
};
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(PcanGainControlTest_TestPcanGainControl) {
uint32_t estimate[] = {6321887, 31248341};
PcanGainControlTestConfig config;
struct PcanGainControlState state;
TF_LITE_MICRO_EXPECT(PcanGainControlPopulateState(
&config.config_, &state, estimate, kNumChannels, kSmoothingBits,
kCorrectionBits));
uint32_t signal[] = {241137, 478104};
PcanGainControlApply(&state, signal);
const uint32_t expected[] = {3578, 1533};
TF_LITE_MICRO_EXPECT_EQ(state.num_channels,
sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.num_channels; ++i) {
TF_LITE_MICRO_EXPECT_EQ(signal[i], expected[i]);
}
PcanGainControlFreeStateContents(&state);
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_test.cc | C++ | apache-2.0 | 2,083 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h"
#include <math.h>
#include <stdio.h>
#define kint16max 0x00007FFF
void PcanGainControlFillConfigWithDefaults(
struct PcanGainControlConfig* config) {
config->enable_pcan = 0;
config->strength = 0.95;
config->offset = 80.0;
config->gain_bits = 21;
}
int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
int32_t input_bits, uint32_t x) {
const float x_as_float = ((float)x) / ((uint32_t)1 << input_bits);
const float gain_as_float =
((uint32_t)1 << config->gain_bits) *
powf(x_as_float + config->offset, -config->strength);
if (gain_as_float > kint16max) {
return kint16max;
}
return (int16_t)(gain_as_float + 0.5f);
}
int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
struct PcanGainControlState* state,
uint32_t* noise_estimate,
const int num_channels,
const uint16_t smoothing_bits,
const int32_t input_correction_bits) {
state->enable_pcan = config->enable_pcan;
if (!state->enable_pcan) {
return 1;
}
state->noise_estimate = noise_estimate;
state->num_channels = num_channels;
state->gain_lut = malloc(kWideDynamicFunctionLUTSize * sizeof(int16_t));
if (state->gain_lut == NULL) {
fprintf(stderr, "Failed to allocate gain LUT\n");
return 0;
}
state->snr_shift = config->gain_bits - input_correction_bits - kPcanSnrBits;
const int32_t input_bits = smoothing_bits - input_correction_bits;
state->gain_lut[0] = PcanGainLookupFunction(config, input_bits, 0);
state->gain_lut[1] = PcanGainLookupFunction(config, input_bits, 1);
state->gain_lut -= 6;
int interval;
for (interval = 2; interval <= kWideDynamicFunctionBits; ++interval) {
const uint32_t x0 = (uint32_t)1 << (interval - 1);
const uint32_t x1 = x0 + (x0 >> 1);
const uint32_t x2 =
(interval == kWideDynamicFunctionBits) ? x0 + (x0 - 1) : 2 * x0;
const int16_t y0 = PcanGainLookupFunction(config, input_bits, x0);
const int16_t y1 = PcanGainLookupFunction(config, input_bits, x1);
const int16_t y2 = PcanGainLookupFunction(config, input_bits, x2);
const int32_t diff1 = (int32_t)y1 - y0;
const int32_t diff2 = (int32_t)y2 - y0;
const int32_t a1 = 4 * diff1 - diff2;
const int32_t a2 = diff2 - a1;
state->gain_lut[4 * interval] = y0;
state->gain_lut[4 * interval + 1] = (int16_t)a1;
state->gain_lut[4 * interval + 2] = (int16_t)a2;
}
state->gain_lut += 6;
return 1;
}
void PcanGainControlFreeStateContents(struct PcanGainControlState* state) {
free(state->gain_lut);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.c | C | apache-2.0 | 3,482 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
#include "tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control.h"
#define kWideDynamicFunctionBits 32
#define kWideDynamicFunctionLUTSize (4 * kWideDynamicFunctionBits - 3)
#ifdef __cplusplus
extern "C" {
#endif
struct PcanGainControlConfig {
// set to false (0) to disable this module
int enable_pcan;
// gain normalization exponent (0.0 disables, 1.0 full strength)
float strength;
// positive value added in the normalization denominator
float offset;
// number of fractional bits in the gain
int gain_bits;
};
void PcanGainControlFillConfigWithDefaults(
struct PcanGainControlConfig* config);
int16_t PcanGainLookupFunction(const struct PcanGainControlConfig* config,
int32_t input_bits, uint32_t x);
int PcanGainControlPopulateState(const struct PcanGainControlConfig* config,
struct PcanGainControlState* state,
uint32_t* noise_estimate,
const int num_channels,
const uint16_t smoothing_bits,
const int32_t input_correction_bits);
void PcanGainControlFreeStateContents(struct PcanGainControlState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_PCAN_GAIN_CONTROL_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/pcan_gain_control_util.h | C | apache-2.0 | 2,208 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
#include <string.h>
int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
size_t num_samples, size_t* num_samples_read) {
const int size = state->size;
// Copy samples from the samples buffer over to our local input.
size_t max_samples_to_copy = state->size - state->input_used;
if (max_samples_to_copy > num_samples) {
max_samples_to_copy = num_samples;
}
memcpy(state->input + state->input_used, samples,
max_samples_to_copy * sizeof(*samples));
*num_samples_read = max_samples_to_copy;
state->input_used += max_samples_to_copy;
if (state->input_used < state->size) {
// We don't have enough samples to compute a window.
return 0;
}
// Apply the window to the input.
const int16_t* coefficients = state->coefficients;
const int16_t* input = state->input;
int16_t* output = state->output;
int i;
int16_t max_abs_output_value = 0;
for (i = 0; i < size; ++i) {
int16_t new_value =
(((int32_t)*input++) * *coefficients++) >> kFrontendWindowBits;
*output++ = new_value;
if (new_value < 0) {
new_value = -new_value;
}
if (new_value > max_abs_output_value) {
max_abs_output_value = new_value;
}
}
// Shuffle the input down by the step size, and update how much we have used.
memmove(state->input, state->input + state->step,
sizeof(*state->input) * (state->size - state->step));
state->input_used -= state->step;
state->max_abs_output_value = max_abs_output_value;
// Indicate that the output buffer is valid for the next stage.
return 1;
}
void WindowReset(struct WindowState* state) {
memset(state->input, 0, state->size * sizeof(*state->input));
memset(state->output, 0, state->size * sizeof(*state->output));
state->input_used = 0;
state->max_abs_output_value = 0;
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window.c | C | apache-2.0 | 2,587 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
#include <stdint.h>
#include <stdlib.h>
#define kFrontendWindowBits 12
#ifdef __cplusplus
extern "C" {
#endif
struct WindowState {
size_t size;
int16_t* coefficients;
size_t step;
int16_t* input;
size_t input_used;
int16_t* output;
int16_t max_abs_output_value;
};
// Applies a window to the samples coming in, stepping forward at the given
// rate.
int WindowProcessSamples(struct WindowState* state, const int16_t* samples,
size_t num_samples, size_t* num_samples_read);
void WindowReset(struct WindowState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window.h | C | apache-2.0 | 1,476 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/window_io.h"
void WindowWriteMemmapPreamble(FILE* fp, const struct WindowState* state) {
fprintf(fp, "static int16_t window_coefficients[] = {\n");
int i;
for (i = 0; i < state->size; ++i) {
fprintf(fp, "%d", state->coefficients[i]);
if (i < state->size - 1) {
fprintf(fp, ", ");
}
}
fprintf(fp, "};\n");
fprintf(fp, "static int16_t window_input[%zu];\n", state->size);
fprintf(fp, "static int16_t window_output[%zu];\n", state->size);
fprintf(fp, "\n");
}
void WindowWriteMemmap(FILE* fp, const struct WindowState* state,
const char* variable) {
fprintf(fp, "%s->size = %zu;\n", variable, state->size);
fprintf(fp, "%s->coefficients = window_coefficients;\n", variable);
fprintf(fp, "%s->step = %zu;\n", variable, state->step);
fprintf(fp, "%s->input = window_input;\n", variable);
fprintf(fp, "%s->input_used = %zu;\n", variable, state->input_used);
fprintf(fp, "%s->output = window_output;\n", variable);
fprintf(fp, "%s->max_abs_output_value = %d;\n", variable,
state->max_abs_output_value);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window_io.c | C | apache-2.0 | 1,808 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_IO_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_IO_H_
#include <stdio.h>
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
#ifdef __cplusplus
extern "C" {
#endif
void WindowWriteMemmapPreamble(FILE* fp, const struct WindowState* state);
void WindowWriteMemmap(FILE* fp, const struct WindowState* state,
const char* variable);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_IO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window_io.h | C | apache-2.0 | 1,237 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
#include "tensorflow/lite/micro/testing/micro_test.h"
namespace {
const int kSampleRate = 1000;
const int kWindowSamples = 25;
const int kStepSamples = 10;
const int16_t kFakeAudioData[] = {
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768,
0, 32767, 0, -32768, 0, 32767, 0, -32768, 0, 32767, 0, -32768};
// Test window function behaviors using default config values.
class WindowTestConfig {
public:
WindowTestConfig() {
config_.size_ms = 25;
config_.step_size_ms = 10;
}
struct WindowConfig config_;
};
} // namespace
TF_LITE_MICRO_TESTS_BEGIN
TF_LITE_MICRO_TEST(WindowState_CheckCoefficients) {
WindowTestConfig config;
struct WindowState state;
TF_LITE_MICRO_EXPECT(
WindowPopulateState(&config.config_, &state, kSampleRate));
const int16_t expected[] = {16, 144, 391, 743, 1176, 1664, 2177,
2681, 3145, 3541, 3843, 4032, 4096, 4032,
3843, 3541, 3145, 2681, 2177, 1664, 1176,
743, 391, 144, 16};
TF_LITE_MICRO_EXPECT_EQ(state.size, sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.coefficients[i], expected[i]);
}
WindowFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(WindowState_CheckResidualInput) {
WindowTestConfig config;
struct WindowState state;
TF_LITE_MICRO_EXPECT(
WindowPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]), &num_samples_read));
int i;
for (i = kStepSamples; i < kWindowSamples; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.input[i - kStepSamples], kFakeAudioData[i]);
}
WindowFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(WindowState_CheckOutputValues) {
WindowTestConfig config;
struct WindowState state;
TF_LITE_MICRO_EXPECT(
WindowPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]), &num_samples_read));
const int16_t expected[] = {
0, 1151, 0, -5944, 0, 13311, 0, -21448, 0, 28327, 0, -32256, 0, 32255,
0, -28328, 0, 21447, 0, -13312, 0, 5943, 0, -1152, 0};
TF_LITE_MICRO_EXPECT_EQ(state.size, sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.output[i], expected[i]);
}
WindowFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(WindowState_CheckMaxAbsValue) {
WindowTestConfig config;
struct WindowState state;
TF_LITE_MICRO_EXPECT(
WindowPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]), &num_samples_read));
TF_LITE_MICRO_EXPECT_EQ(state.max_abs_output_value, 32256);
WindowFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(WindowState_CheckConsecutiveWindow) {
WindowTestConfig config;
struct WindowState state;
TF_LITE_MICRO_EXPECT(
WindowPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]), &num_samples_read));
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData + kWindowSamples,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) - kWindowSamples,
&num_samples_read));
const int16_t expected[] = {
0, -1152, 0, 5943, 0, -13312, 0, 21447, 0, -28328, 0, 32255, 0, -32256,
0, 28327, 0, -21448, 0, 13311, 0, -5944, 0, 1151, 0};
TF_LITE_MICRO_EXPECT_EQ(state.size, sizeof(expected) / sizeof(expected[0]));
int i;
for (i = 0; i < state.size; ++i) {
TF_LITE_MICRO_EXPECT_EQ(state.output[i], expected[i]);
}
WindowFreeStateContents(&state);
}
TF_LITE_MICRO_TEST(WindowState_CheckNotEnoughSamples) {
WindowTestConfig config;
struct WindowState state;
TF_LITE_MICRO_EXPECT(
WindowPopulateState(&config.config_, &state, kSampleRate));
size_t num_samples_read;
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]), &num_samples_read));
TF_LITE_MICRO_EXPECT(WindowProcessSamples(
&state, kFakeAudioData + kWindowSamples,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) - kWindowSamples,
&num_samples_read));
TF_LITE_MICRO_EXPECT_EQ(
false, WindowProcessSamples(
&state, kFakeAudioData + kWindowSamples + kStepSamples,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) -
kWindowSamples - kStepSamples,
&num_samples_read));
TF_LITE_MICRO_EXPECT_EQ(
state.input_used,
sizeof(kFakeAudioData) / sizeof(kFakeAudioData[0]) - 2 * kStepSamples);
WindowFreeStateContents(&state);
}
TF_LITE_MICRO_TESTS_END
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window_test.cc | C++ | apache-2.0 | 6,036 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/window_util.h"
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
// Some platforms don't have M_PI
#ifndef M_PI
#define M_PI 3.14159265358979323846
#endif
void WindowFillConfigWithDefaults(struct WindowConfig* config) {
config->size_ms = 25;
config->step_size_ms = 10;
}
int WindowPopulateState(const struct WindowConfig* config,
struct WindowState* state, int sample_rate) {
state->size = config->size_ms * sample_rate / 1000;
state->step = config->step_size_ms * sample_rate / 1000;
state->coefficients = malloc(state->size * sizeof(*state->coefficients));
if (state->coefficients == NULL) {
fprintf(stderr, "Failed to allocate window coefficients\n");
return 0;
}
// Populate the window values.
const float arg = M_PI * 2.0 / ((float)state->size);
int i;
for (i = 0; i < state->size; ++i) {
float float_value = 0.5 - (0.5 * cos(arg * (i + 0.5)));
// Scale it to fixed point and round it.
state->coefficients[i] =
floor(float_value * (1 << kFrontendWindowBits) + 0.5);
}
state->input_used = 0;
state->input = malloc(state->size * sizeof(*state->input));
if (state->input == NULL) {
fprintf(stderr, "Failed to allocate window input\n");
return 0;
}
state->output = malloc(state->size * sizeof(*state->output));
if (state->output == NULL) {
fprintf(stderr, "Failed to allocate window output\n");
return 0;
}
return 1;
}
void WindowFreeStateContents(struct WindowState* state) {
free(state->coefficients);
free(state->input);
free(state->output);
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window_util.c | C | apache-2.0 | 2,325 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
#include "tensorflow/lite/experimental/microfrontend/lib/window.h"
#ifdef __cplusplus
extern "C" {
#endif
struct WindowConfig {
// length of window frame in milliseconds
size_t size_ms;
// length of step for next frame in milliseconds
size_t step_size_ms;
};
// Populates the WindowConfig with "sane" default values.
void WindowFillConfigWithDefaults(struct WindowConfig* config);
// Allocates any buffers.
int WindowPopulateState(const struct WindowConfig* config,
struct WindowState* state, int sample_rate);
// Frees any allocated buffers.
void WindowFreeStateContents(struct WindowState* state);
#ifdef __cplusplus
} // extern "C"
#endif
#endif // TENSORFLOW_LITE_EXPERIMENTAL_MICROFRONTEND_LIB_WINDOW_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/lib/window_util.h | C | apache-2.0 | 1,565 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/microfrontend/lib/frontend.h"
#include "tensorflow/lite/experimental/microfrontend/lib/frontend_util.h"
#include "tensorflow/core/framework/op.h"
#include "tensorflow/core/framework/op_kernel.h"
#include "tensorflow/core/framework/shape_inference.h"
#include "tensorflow/core/framework/tensor.h"
#include "tensorflow/core/framework/tensor_shape.h"
#include "tensorflow/core/lib/core/errors.h"
#include "tensorflow/core/lib/core/status.h"
#include "tensorflow/core/platform/macros.h"
using tensorflow::OpKernel;
using tensorflow::OpKernelConstruction;
using tensorflow::OpKernelContext;
using tensorflow::Status;
using tensorflow::Tensor;
using tensorflow::TensorShape;
using tensorflow::TensorShapeUtils;
using tensorflow::errors::Internal;
using tensorflow::errors::InvalidArgument;
using tensorflow::shape_inference::DimensionHandle;
using tensorflow::shape_inference::InferenceContext;
using tensorflow::shape_inference::ShapeHandle;
namespace tensorflow {
REGISTER_OP("AudioMicrofrontend")
.Input("audio: int16")
.Output("filterbanks: out_type")
.Attr("sample_rate: int = 16000")
.Attr("window_size: int = 25")
.Attr("window_step: int = 10")
.Attr("num_channels: int = 32")
.Attr("upper_band_limit: float = 7500.0")
.Attr("lower_band_limit: float = 125.0")
.Attr("smoothing_bits: int = 10")
.Attr("even_smoothing: float = 0.025")
.Attr("odd_smoothing: float = 0.06")
.Attr("min_signal_remaining: float = 0.05")
.Attr("enable_pcan: bool = false")
.Attr("pcan_strength: float = 0.95")
.Attr("pcan_offset: float = 80.0")
.Attr("gain_bits: int = 21")
.Attr("enable_log: bool = true")
.Attr("scale_shift: int = 6")
.Attr("left_context: int = 0")
.Attr("right_context: int = 0")
.Attr("frame_stride: int = 1")
.Attr("zero_padding: bool = false")
.Attr("out_scale: int = 1")
.Attr("out_type: {uint16, float} = DT_UINT16")
.SetShapeFn([](InferenceContext* ctx) {
ShapeHandle input;
TF_RETURN_IF_ERROR(ctx->WithRank(ctx->input(0), 1, &input));
int sample_rate;
TF_RETURN_IF_ERROR(ctx->GetAttr("sample_rate", &sample_rate));
int window_size;
TF_RETURN_IF_ERROR(ctx->GetAttr("window_size", &window_size));
window_size *= sample_rate / 1000;
int window_step;
TF_RETURN_IF_ERROR(ctx->GetAttr("window_step", &window_step));
window_step *= sample_rate / 1000;
int num_channels;
TF_RETURN_IF_ERROR(ctx->GetAttr("num_channels", &num_channels));
int left_context;
TF_RETURN_IF_ERROR(ctx->GetAttr("left_context", &left_context));
int right_context;
TF_RETURN_IF_ERROR(ctx->GetAttr("right_context", &right_context));
int frame_stride;
TF_RETURN_IF_ERROR(ctx->GetAttr("frame_stride", &frame_stride));
DimensionHandle num_frames = ctx->Dim(input, 0);
if (ctx->Value(num_frames) < window_size) {
num_frames = ctx->MakeDim(0);
} else {
TF_RETURN_IF_ERROR(ctx->Subtract(num_frames, window_size, &num_frames));
TF_RETURN_IF_ERROR(
ctx->Divide(num_frames, window_step, false, &num_frames));
TF_RETURN_IF_ERROR(
ctx->Divide(num_frames, frame_stride, false, &num_frames));
TF_RETURN_IF_ERROR(ctx->Add(num_frames, 1, &num_frames));
}
int stack_size = 1 + left_context + right_context;
DimensionHandle num_features = ctx->MakeDim(num_channels);
TF_RETURN_IF_ERROR(
ctx->Multiply(num_features, stack_size, &num_features));
ShapeHandle output = ctx->MakeShape({num_frames, num_features});
ctx->set_output(0, output);
return tensorflow::Status::OK();
})
.Doc(R"doc(
Audio Microfrontend Op.
This Op converts a sequence of audio data into one or more
feature vectors containing filterbanks of the input. The
conversion process uses a lightweight library to perform:
1. A slicing window function
2. Short-time FFTs
3. Filterbank calculations
4. Noise reduction
5. PCAN Auto Gain Control
6. Logarithmic scaling
Arguments
audio: 1D Tensor, int16 audio data in temporal ordering.
sample_rate: Integer, the sample rate of the audio in Hz.
window_size: Integer, length of desired time frames in ms.
window_step: Integer, length of step size for the next frame in ms.
num_channels: Integer, the number of filterbank channels to use.
upper_band_limit: Float, the highest frequency included in the filterbanks.
lower_band_limit: Float, the lowest frequency included in the filterbanks.
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
even_smoothing: Float, smoothing coefficient for even-numbered channels.
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
enable_pcan: Bool, enable PCAN auto gain control.
pcan_strength: Float, gain normalization exponent.
pcan_offset: Float, positive value added in the normalization denominator.
gain_bits: Int, number of fractional bits in the gain.
enable_log: Bool, enable logarithmic scaling of filterbanks.
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
left_context: Integer, number of preceding frames to attach to each frame.
right_context: Integer, number of preceding frames to attach to each frame.
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
out_scale: Integer, divide all filterbanks by this number.
out_type: DType, type of the output Tensor, defaults to UINT16.
Returns
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
)doc");
template <typename T>
class AudioMicrofrontendOp : public OpKernel {
public:
explicit AudioMicrofrontendOp(OpKernelConstruction* ctx) : OpKernel(ctx) {
OP_REQUIRES_OK(ctx, ctx->GetAttr("sample_rate", &sample_rate_));
int window_size;
OP_REQUIRES_OK(ctx, ctx->GetAttr("window_size", &window_size));
config_.window.size_ms = window_size;
int window_step;
OP_REQUIRES_OK(ctx, ctx->GetAttr("window_step", &window_step));
config_.window.step_size_ms = window_step;
OP_REQUIRES_OK(
ctx, ctx->GetAttr("num_channels", &config_.filterbank.num_channels));
OP_REQUIRES_OK(ctx, ctx->GetAttr("upper_band_limit",
&config_.filterbank.upper_band_limit));
OP_REQUIRES_OK(ctx, ctx->GetAttr("lower_band_limit",
&config_.filterbank.lower_band_limit));
OP_REQUIRES_OK(ctx, ctx->GetAttr("smoothing_bits",
&config_.noise_reduction.smoothing_bits));
OP_REQUIRES_OK(ctx, ctx->GetAttr("even_smoothing",
&config_.noise_reduction.even_smoothing));
OP_REQUIRES_OK(ctx, ctx->GetAttr("odd_smoothing",
&config_.noise_reduction.odd_smoothing));
OP_REQUIRES_OK(ctx,
ctx->GetAttr("min_signal_remaining",
&config_.noise_reduction.min_signal_remaining));
bool enable_pcan;
OP_REQUIRES_OK(ctx, ctx->GetAttr("enable_pcan", &enable_pcan));
config_.pcan_gain_control.enable_pcan = enable_pcan;
OP_REQUIRES_OK(ctx, ctx->GetAttr("pcan_strength",
&config_.pcan_gain_control.strength));
OP_REQUIRES_OK(
ctx, ctx->GetAttr("pcan_offset", &config_.pcan_gain_control.offset));
OP_REQUIRES_OK(
ctx, ctx->GetAttr("gain_bits", &config_.pcan_gain_control.gain_bits));
bool enable_log;
OP_REQUIRES_OK(ctx, ctx->GetAttr("enable_log", &enable_log));
config_.log_scale.enable_log = enable_log;
OP_REQUIRES_OK(ctx,
ctx->GetAttr("scale_shift", &config_.log_scale.scale_shift));
OP_REQUIRES_OK(ctx, ctx->GetAttr("left_context", &left_context_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("right_context", &right_context_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("frame_stride", &frame_stride_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("zero_padding", &zero_padding_));
OP_REQUIRES_OK(ctx, ctx->GetAttr("out_scale", &out_scale_));
}
void Compute(OpKernelContext* ctx) override {
const Tensor* audio;
OP_REQUIRES_OK(ctx, ctx->input("audio", &audio));
OP_REQUIRES(ctx, TensorShapeUtils::IsVector(audio->shape()),
InvalidArgument("audio is not a vector"));
auto audio_data =
reinterpret_cast<const int16_t*>(audio->tensor_data().data());
int audio_size = audio->NumElements();
Tensor* filterbanks = nullptr;
int window_size = config_.window.size_ms * sample_rate_ / 1000;
int window_step = config_.window.step_size_ms * sample_rate_ / 1000;
int num_frames = 0;
int sampled_frames = 0;
if (audio_size >= window_size) {
num_frames = (audio_size - window_size) / window_step + 1;
sampled_frames = (num_frames - 1) / frame_stride_ + 1;
}
TensorShape filterbanks_shape{
sampled_frames,
config_.filterbank.num_channels * (1 + left_context_ + right_context_)};
OP_REQUIRES_OK(ctx,
ctx->allocate_output(0, filterbanks_shape, &filterbanks));
auto filterbanks_flat = filterbanks->flat<T>();
struct FrontendState state;
if (!TF_PREDICT_TRUE(
FrontendPopulateState(&config_, &state, sample_rate_))) {
ctx->CtxFailure(__FILE__, __LINE__,
Internal("failed to populate frontend state"));
FrontendFreeStateContents(&state);
return;
}
std::vector<std::vector<T>> frame_buffer(num_frames);
int frame_index = 0;
while (audio_size > 0) {
size_t num_samples_read;
struct FrontendOutput output = FrontendProcessSamples(
&state, audio_data, audio_size, &num_samples_read);
audio_data += num_samples_read;
audio_size -= num_samples_read;
if (output.values != nullptr) {
frame_buffer[frame_index].reserve(output.size);
int i;
for (i = 0; i < output.size; ++i) {
frame_buffer[frame_index].push_back(static_cast<T>(output.values[i]) /
out_scale_);
}
++frame_index;
}
}
FrontendFreeStateContents(&state);
int index = 0;
std::vector<T> pad(config_.filterbank.num_channels, 0);
int anchor;
for (anchor = 0; anchor < frame_buffer.size(); anchor += frame_stride_) {
int frame;
for (frame = anchor - left_context_; frame <= anchor + right_context_;
++frame) {
std::vector<T>* feature;
if (zero_padding_ && (frame < 0 || frame >= frame_buffer.size())) {
feature = &pad;
} else if (frame < 0) {
feature = &frame_buffer[0];
} else if (frame >= frame_buffer.size()) {
feature = &frame_buffer[frame_buffer.size() - 1];
} else {
feature = &frame_buffer[frame];
}
for (auto f : *feature) {
filterbanks_flat(index++) = f;
}
}
}
}
protected:
int sample_rate_;
struct FrontendConfig config_;
int left_context_;
int right_context_;
int frame_stride_;
bool zero_padding_;
int out_scale_;
TF_DISALLOW_COPY_AND_ASSIGN(AudioMicrofrontendOp);
};
REGISTER_KERNEL_BUILDER(Name("AudioMicrofrontend")
.Device(tensorflow::DEVICE_CPU)
.TypeConstraint<uint16>("out_type"),
AudioMicrofrontendOp<uint16>);
REGISTER_KERNEL_BUILDER(Name("AudioMicrofrontend")
.Device(tensorflow::DEVICE_CPU)
.TypeConstraint<float>("out_type"),
AudioMicrofrontendOp<float>);
} // namespace tensorflow
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/ops/audio_microfrontend_op.cc | C++ | apache-2.0 | 12,601 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for AudioMicrofrontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.experimental.microfrontend.python.ops import audio_microfrontend_op as frontend_op
from tensorflow.python.framework import ops
SAMPLE_RATE = 1000
WINDOW_SIZE = 25
WINDOW_STEP = 10
NUM_CHANNELS = 2
UPPER_BAND_LIMIT = 450.0
LOWER_BAND_LIMIT = 8.0
SMOOTHING_BITS = 10
class AudioFeatureGenerationTest(tf.test.TestCase):
def setUp(self):
super(AudioFeatureGenerationTest, self).setUp()
ops.disable_eager_execution()
def testSimple(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True)
self.assertAllEqual(filterbanks.eval(),
[[479, 425], [436, 378], [410, 350], [391, 325]])
def testSimpleFloatScaled(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
out_scale=64,
out_type=tf.float32)
self.assertAllEqual(filterbanks.eval(),
[[7.484375, 6.640625], [6.8125, 5.90625],
[6.40625, 5.46875], [6.109375, 5.078125]])
def testStacking(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
right_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 436, 378], [410, 350, 391, 325]])
def testStackingWithOverlap(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
right_context=1)
self.assertAllEqual(
self.evaluate(filterbanks),
[[479, 425, 479, 425, 436, 378], [479, 425, 436, 378, 410, 350],
[436, 378, 410, 350, 391, 325], [410, 350, 391, 325, 391, 325]])
def testStackingDropFrame(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 4 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=1,
frame_stride=2)
self.assertAllEqual(filterbanks.eval(),
[[479, 425, 479, 425], [436, 378, 410, 350]])
def testZeroPadding(self):
with self.test_session():
audio = tf.constant(
[0, 32767, 0, -32768] * ((WINDOW_SIZE + 7 * WINDOW_STEP) // 4),
tf.int16)
filterbanks = frontend_op.audio_microfrontend(
audio,
sample_rate=SAMPLE_RATE,
window_size=WINDOW_SIZE,
window_step=WINDOW_STEP,
num_channels=NUM_CHANNELS,
upper_band_limit=UPPER_BAND_LIMIT,
lower_band_limit=LOWER_BAND_LIMIT,
smoothing_bits=SMOOTHING_BITS,
enable_pcan=True,
left_context=2,
frame_stride=3,
zero_padding=True)
self.assertAllEqual(
self.evaluate(filterbanks),
[[0, 0, 0, 0, 479, 425], [436, 378, 410, 350, 391, 325],
[374, 308, 362, 292, 352, 275]])
if __name__ == '__main__':
tf.test.main()
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/python/kernel_tests/audio_microfrontend_op_test.py | Python | apache-2.0 | 5,906 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""AudioMicrofrontend Op creates filterbanks from audio data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.experimental.microfrontend.ops import gen_audio_microfrontend_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import load_library
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import resource_loader
from tensorflow.python.util.tf_export import tf_export
_audio_microfrontend_op = load_library.load_op_library(
resource_loader.get_path_to_datafile("_audio_microfrontend_op.so"))
@tf_export("lite.experimental.microfrontend.python.ops.audio_microfrontend")
def audio_microfrontend(audio,
sample_rate=16000,
window_size=25,
window_step=10,
num_channels=32,
upper_band_limit=7500.0,
lower_band_limit=125.0,
smoothing_bits=10,
even_smoothing=0.025,
odd_smoothing=0.06,
min_signal_remaining=0.05,
enable_pcan=True,
pcan_strength=0.95,
pcan_offset=80.0,
gain_bits=21,
enable_log=True,
scale_shift=6,
left_context=0,
right_context=0,
frame_stride=1,
zero_padding=False,
out_scale=1,
out_type=dtypes.uint16):
"""Audio Microfrontend Op.
This Op converts a sequence of audio data into one or more
feature vectors containing filterbanks of the input. The
conversion process uses a lightweight library to perform:
1. A slicing window function
2. Short-time FFTs
3. Filterbank calculations
4. Noise reduction
5. PCAN Auto Gain Control
6. Logarithmic scaling
Args:
audio: 1D Tensor, int16 audio data in temporal ordering.
sample_rate: Integer, the sample rate of the audio in Hz.
window_size: Integer, length of desired time frames in ms.
window_step: Integer, length of step size for the next frame in ms.
num_channels: Integer, the number of filterbank channels to use.
upper_band_limit: Float, the highest frequency included in the filterbanks.
lower_band_limit: Float, the lowest frequency included in the filterbanks.
smoothing_bits: Int, scale up signal by 2^(smoothing_bits) before reduction.
even_smoothing: Float, smoothing coefficient for even-numbered channels.
odd_smoothing: Float, smoothing coefficient for odd-numbered channels.
min_signal_remaining: Float, fraction of signal to preserve in smoothing.
enable_pcan: Bool, enable PCAN auto gain control.
pcan_strength: Float, gain normalization exponent.
pcan_offset: Float, positive value added in the normalization denominator.
gain_bits: Int, number of fractional bits in the gain.
enable_log: Bool, enable logarithmic scaling of filterbanks.
scale_shift: Integer, scale filterbanks by 2^(scale_shift).
left_context: Integer, number of preceding frames to attach to each frame.
right_context: Integer, number of preceding frames to attach to each frame.
frame_stride: Integer, M frames to skip over, where output[n] = frame[n*M].
zero_padding: Bool, if left/right context is out-of-bounds, attach frame of
zeroes. Otherwise, frame[0] or frame[size-1] will be copied.
out_scale: Integer, divide all filterbanks by this number.
out_type: DType, type of the output Tensor, defaults to UINT16.
Returns:
filterbanks: 2D Tensor, each row is a time frame, each column is a channel.
Raises:
ValueError: If the audio tensor is not explicitly a vector.
"""
audio_shape = audio.shape
if audio_shape.ndims is None:
raise ValueError("Input to `AudioMicrofrontend` should have known rank.")
if len(audio_shape) > 1:
audio = array_ops.reshape(audio, [-1])
return gen_audio_microfrontend_op.audio_microfrontend(
audio, sample_rate, window_size, window_step, num_channels,
upper_band_limit, lower_band_limit, smoothing_bits, even_smoothing,
odd_smoothing, min_signal_remaining, enable_pcan, pcan_strength,
pcan_offset, gain_bits, enable_log, scale_shift, left_context,
right_context, frame_stride, zero_padding, out_scale, out_type)
ops.NotDifferentiable("AudioMicrofrontend")
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/microfrontend/python/ops/audio_microfrontend_op.py | Python | apache-2.0 | 5,330 |
load(
":build_def.bzl",
"gen_zip_test",
"generated_test_models_all",
)
package(
default_visibility = [
"//visibility:public",
],
licenses = ["notice"],
)
py_library(
name = "mlir_convert",
srcs = ["mlir_convert.py"],
data = [
"//tensorflow/compiler/mlir/lite:tf_tfl_translate",
],
srcs_version = "PY3",
deps = [
"//tensorflow:tensorflow_py",
"//tensorflow/lite/python:test_util",
"//tensorflow/lite/testing:_pywrap_string_util",
"//tensorflow/lite/testing:generate_examples_lib",
"//tensorflow/lite/testing:generate_examples_report",
"//tensorflow/python:graph_util",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
[gen_zip_test(
name = "zip_test_%s" % test_name,
size = "medium",
srcs = ["generated_examples_zip_test.cc"],
args = args + select({
"//tensorflow:android": [],
"//conditions:default": [
"--zip_file_path=$(location :zip_%s)" % test_name,
# TODO(angerson) We may be able to add an external unzip binary instead
# of relying on an existing one for OSS builds.
#"--unzip_binary_path=$(location //third_party/unzip)",
],
}),
conversion_mode = conversion_mode,
# copybara:uncomment_begin(no special handling for Android in OSS)
# data = select({
# "//tensorflow:android": [],
# "//conditions:default": [
# ":zip_%s" % test_name,
# "//third_party/unzip",
# ],
# }),
# copybara:uncomment_end_and_comment_begin
data = [":zip_%s" % test_name],
# copybara:comment_end
shard_count = 20,
tags = tags + [
"gen_zip_test",
"tflite_not_portable_intentional",
],
test_name = test_name,
deps = [
"//tensorflow/lite/testing:parse_testdata_lib",
"//tensorflow/lite/testing:tflite_driver",
"//tensorflow/lite/testing:util",
"@com_google_googletest//:gtest",
"@com_googlesource_code_re2//:re2",
"//tensorflow/lite:builtin_op_data",
"//tensorflow/lite:framework",
"//tensorflow/lite/kernels:builtin_ops",
] + select({
"//conditions:default": [
"//tensorflow/core:framework_internal",
"//tensorflow/core:lib",
"//tensorflow/core:test",
],
"//tensorflow:android": [
"//tensorflow/core:portable_tensorflow_lib",
"//tensorflow/core:portable_tensorflow_test_lib",
],
}),
) for conversion_mode, test_name, tags, args in generated_test_models_all()]
py_library(
name = "op_tests",
srcs = glob(["op_tests/*.py"]),
srcs_version = "PY3",
deps = [
"//third_party/py/numpy",
"//tensorflow:tensorflow_py",
# copybara:uncomment_begin(b/186563810)
# "//third_party/py/tensorflow_addons",
# copybara:uncomment_end
"//tensorflow/lite/testing:zip_test_utils",
],
)
py_binary(
name = "generate_examples",
srcs = ["generate_examples.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [
":mlir_convert",
":op_tests",
"//tensorflow:tensorflow_py",
"//tensorflow/lite/testing:generate_examples_lib",
"//tensorflow/lite/testing:zip_test_utils",
"//third_party/py/numpy",
"@six_archive//:six",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/BUILD | Starlark | apache-2.0 | 3,404 |
load(
"//tensorflow:tensorflow.bzl",
"tf_cc_test",
)
load(
"//tensorflow/lite:build_def.bzl",
"generated_test_models",
)
# This is forked from `tensorflow/lite/build_def.bzl`.
# TODO(b/136499575): Merge this back to TFLite codebase when open sourcing.
def mlir_generated_test_denylisted_models():
return [
# TODO(b/150647400): This test passes in TF2 with tf.compat.v1 but
# fails in TF1 with tf.compat.v1. Due to the testing environments
# changing on 3/3, this will only be disabled temporarily.
"unidirectional_sequence_lstm",
"unidirectional_sequence_rnn",
]
# Test cases which only work with MLIR-based conversion now.
def mlir_only_generated_test_models():
return [
"batchmatmul",
"broadcast_to",
"broadcast_gradient_args",
"cond",
"complex_abs",
"control_dep",
"conv_bias_relu6",
"conv3d",
"cumsum",
# TODO(b/186563810): Enable after resolving tensorflow_addons dep issue
# that causes test failures in the exported codebase.
# copybara:uncomment_begin
# "dense_image_warp",
# copybara:uncomment_end
"dynamic_rnn",
"einsum",
"identify_dilated_conv",
"identify_dilated_conv1d",
"imag",
"irfft2d",
"is_finite",
"max_pool_with_argmax",
"parse_example",
"real",
"reciprocal",
"reduce_all",
"rfft",
"rfft2d",
"segment_sum",
"shape_to_strided_slice",
"softplus",
"static_hashtable",
"static_rnn_with_control_flow_v2",
"stft",
"tensor_list_concat",
"tensor_list_get_item",
"tensor_list_length",
"tensor_list_resize",
"tensor_list_set_item",
"tensor_list_dynamic_shape",
"where_v2",
"while",
]
# Test cases which only work internally now.
def no_oss_generated_test_models():
return [
"cond",
"equal",
"fill",
"gather",
"gather_nd",
"not_equal",
"parse_example",
"slice",
"sparse_to_dense",
"squeeze",
"static_hashtable",
"strided_slice",
"tile",
"while",
]
# List of models that fail generated tests for the conversion mode.
# If you have to disable a test, please add here with a link to the appropriate
# bug or issue.
def generated_test_models_failing(conversion_mode):
return []
def mlir_generated_test_models():
"""Returns a list of models to be tested with MLIR-based conversion."""
models = []
denylisted_models = mlir_generated_test_denylisted_models()
for model in generated_test_models() + mlir_only_generated_test_models():
if model not in denylisted_models:
models.append(model)
return models
def generated_test_conversion_modes():
"""Returns a list of conversion modes."""
return ["forward-compat", "", "mlir-quant"]
def generated_test_models_all():
"""Generates a list of all tests with the different converters.
Returns:
List of tuples representing:
(conversion mode, name of test, test tags, test args).
"""
conversion_modes = generated_test_conversion_modes()
no_oss_tests = no_oss_generated_test_models()
options = []
for conversion_mode in conversion_modes:
failing_tests = generated_test_models_failing(conversion_mode)
for test in mlir_generated_test_models():
tags = []
args = []
# TODO(b/187992093): Exclude tests that are failing in OSS for now.
if test in no_oss_tests:
tags.append("no_oss")
# Forward-compat coverage testing is largely redundant, and
# contributes to coverage test bloat.
if conversion_mode == "forward-compat":
tags.append("nozapfhahn")
if test in failing_tests:
tags.append("notap")
tags.append("manual")
if conversion_mode:
test += "_%s" % conversion_mode
options.append((conversion_mode, test, tags, args))
return options
def gen_zip_test(name, test_name, conversion_mode, **kwargs):
"""Generate a zipped-example test and its dependent zip files.
Args:
name: str. Resulting cc_test target name
test_name: str. Test targets this model. Comes from the list above.
conversion_mode: str. Which conversion mode to run with. Comes from the
list above.
**kwargs: tf_cc_test kwargs
"""
flags = ""
if conversion_mode == "forward-compat":
flags += " --make_forward_compat_test"
elif conversion_mode == "mlir-quant":
flags += " --mlir_quantizer"
gen_zipped_test_file(
name = "zip_%s" % test_name,
file = "%s.zip" % test_name,
flags = flags,
)
tf_cc_test(name, **kwargs)
def gen_zipped_test_file(name, file, flags = ""):
"""Generate a zip file of tests by using :generate_examples.
Args:
name: str. Name of output. We will produce "`file`.files" as a target.
file: str. The name of one of the generated_examples targets, e.g. "transpose"
flags: str. Any additional flags to include
"""
native.genrule(
name = file + ".files",
cmd = (("$(locations :generate_examples) " +
" --zip_to_output {0} {1} $(@D)").format(file, flags)),
outs = [file],
# `exec_tools` is required for PY3 compatibility in place of `tools`.
exec_tools = [
":generate_examples",
],
)
native.filegroup(
name = name,
srcs = [file],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/build_def.bzl | Starlark | apache-2.0 | 5,748 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generates a series of test cases using MLIR-based conversion."""
# This is forked from `tensorflow/lite/testing/generate_examples.py`.
# TODO(b/136499575): Merge this back to TFLite codebase when open sourcing.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow.compat.v1 as tf
from tensorflow.lite.experimental.mlir.testing import mlir_convert
# pylint: disable=unused-import
from tensorflow.lite.experimental.mlir.testing.op_tests.batchmatmul import make_batchmatmul_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.broadcast_gradient_args import make_broadcast_gradient_args_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.broadcast_to import make_broadcast_to_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.complex_abs import make_complex_abs_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.cond import make_cond_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.control_dep import make_control_dep_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.conv3d import make_conv3d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.conv_bias_activation import make_conv_bias_relu6_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.cumsum import make_cumsum_tests
# Placeholder for make_dense_image_warp_tests import
from tensorflow.lite.experimental.mlir.testing.op_tests.dynamic_rnn import make_dynamic_rnn_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.einsum import make_einsum_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.identify_dilated_conv import make_identify_dilated_conv_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.identify_dilated_conv1d import make_identify_dilated_conv1d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.imag import make_imag_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.irfft2d import make_irfft2d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.is_finite import make_is_finite_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.max_pool_with_argmax import make_max_pool_with_argmax_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.parse_example import make_parse_example_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.real import make_real_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.reciprocal import make_reciprocal_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.rfft import make_rfft_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.rfft2d import make_rfft2d_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.segment_sum import make_segment_sum_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.shape_to_strided_slice import make_shape_to_strided_slice_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.softplus import make_softplus_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.static_hashtable import make_static_hashtable_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.static_rnn_with_control_flow_v2 import make_static_rnn_with_control_flow_v2_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.stft import make_stft_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_concat import make_tensor_list_concat_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_dynamic_shape import make_tensor_list_dynamic_shape_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_get_item import make_tensor_list_get_item_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_length import make_tensor_list_length_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_resize import make_tensor_list_resize_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.tensor_list_set_item import make_tensor_list_set_item_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.where_v2 import make_where_v2_tests
from tensorflow.lite.experimental.mlir.testing.op_tests.while_loop import make_while_tests
from tensorflow.lite.testing import generate_examples_lib
MLIR_CONVERTER_KNOWN_BUGS = {
# We need to support dynamic_rnn case.
r"unidirectional_sequence_rnn.*is_dynamic_rnn=True": "128997102",
r"unidirectional_sequence_lstm.*is_dynamic_rnn=True": "128997102",
# TODO(b/124314620): Test cases work with tf_tfl_translate binary
# but not TFLiteConverter interface.
# Concat & SpaceToDepth with uint8 doesn't work.
r"concat.*type=tf\.uint8": "124314620",
r"space_to_depth.*type=tf\.uint8": "124314620",
r"l2norm.*fully_quantize=True": "134594898",
# Below are not really a converter bug, but our kernels doesn't support
# int64.
r"div.*dtype=tf\.int64": "119126484",
r"floor_div.*dtype=tf\.int64": "119126484",
r"mul.*dtype=tf\.int64": "119126484",
r"relu.*dtype=tf\.int64": "119126484",
r"squared_difference.*dtype=tf\.int64": "119126484",
# Post-training quantization support missing for below op in mlir.
r"prelu.*fully_quantize=True": "156112683",
# ResizeBilinear op kernel supports only float32 and quantized 8-bit
# integers.
r"resize_bilinear.*dtype=tf\.int32": "156569626",
}
# Disable GPU for now since we are just testing in TF against CPU reference
# value and creating non-device-specific graphs to export.
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
parser = argparse.ArgumentParser(description="Script to generate TFLite tests.")
parser.add_argument("output_path",
help="Directory where the outputs will be go.")
parser.add_argument(
"--zip_to_output",
type=str,
help="Particular zip to output.",
required=True)
parser.add_argument(
"--known_bugs_are_errors",
action="store_true",
help=("If a particular model is affected by a known bug,"
" count it as a converter error."))
parser.add_argument(
"--ignore_converter_errors",
action="store_true",
help="Raise an exception if any converter error is encountered.")
parser.add_argument(
"--save_graphdefs",
action="store_true",
help="Include intermediate graphdefs in the output zip files.")
parser.add_argument(
"--run_with_flex",
action="store_true",
help="Whether the TFLite Flex converter is being used.")
parser.add_argument(
"--make_edgetpu_tests",
action="store_true",
help="Whether to generate test cases for edgetpu.")
parser.add_argument(
"--make_forward_compat_test",
action="store_true",
help="Make tests by setting TF forward compatibility horizon to the future")
parser.add_argument(
"--test_sets",
type=str,
help=("Comma-separated list of test set names to generate. "
"If not specified, a test set is selected by parsing the name of "
"'zip_to_output' file."))
parser.add_argument(
"--mlir_quantizer",
action="store_true",
help=("Whether the new MLIR quantizer is being used."))
def main(unused_args):
options = generate_examples_lib.Options()
options.output_path = FLAGS.output_path
options.zip_to_output = FLAGS.zip_to_output
options.known_bugs_are_errors = FLAGS.known_bugs_are_errors
options.ignore_converter_errors = FLAGS.ignore_converter_errors
options.save_graphdefs = FLAGS.save_graphdefs
options.run_with_flex = FLAGS.run_with_flex
options.make_edgetpu_tests = FLAGS.make_edgetpu_tests
options.tflite_convert_function = mlir_convert.mlir_convert
options.known_bugs = MLIR_CONVERTER_KNOWN_BUGS
options.make_forward_compat_test = FLAGS.make_forward_compat_test
options.use_experimental_converter = True
options.mlir_quantizer = FLAGS.mlir_quantizer
if FLAGS.test_sets:
test_sets = FLAGS.test_sets.split(",")
generate_examples_lib.generate_multi_set_examples(options, test_sets)
else:
generate_examples_lib.generate_examples(options)
if __name__ == "__main__":
FLAGS, unparsed = parser.parse_known_args()
if unparsed:
print("Usage: %s <path out> <zip file to generate>")
exit(1)
else:
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/generate_examples.py | Python | apache-2.0 | 8,943 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <cstdarg>
#include <cstdio>
#include <cstdlib>
#include <fstream>
#include <map>
#include <sstream>
#include <gtest/gtest.h>
#include "re2/re2.h"
#include "tensorflow/lite/testing/parse_testdata.h"
#include "tensorflow/lite/testing/tflite_driver.h"
#include "tensorflow/lite/testing/util.h"
#include "tensorflow/core/lib/core/status_test_util.h"
#include "tensorflow/core/platform/env.h"
#include "tensorflow/core/platform/subprocess.h"
#include "tensorflow/core/util/command_line_flags.h"
namespace tflite {
namespace testing {
namespace {
bool FLAGS_ignore_known_bugs = true;
// As archive file names are test-specific, no default is possible.
//
// This test supports input as both zip and tar, as a stock android image does
// not have unzip but does have tar.
string* FLAGS_zip_file_path = new string;
string* FLAGS_tar_file_path = new string;
#ifndef __ANDROID__
string* FLAGS_unzip_binary_path = new string("/usr/bin/unzip");
string* FLAGS_tar_binary_path = new string("/bin/tar");
#else
string* FLAGS_unzip_binary_path = new string("/system/bin/unzip");
string* FLAGS_tar_binary_path = new string("/system/bin/tar");
#endif
bool FLAGS_use_nnapi = false;
bool FLAGS_ignore_unsupported_nnapi = false;
} // namespace
// TensorFlow system environment for file system called.
tensorflow::Env* env = tensorflow::Env::Default();
// List of tests that are expected to fail when
// --test_arg=--ignore_known_bugs=false
// Key is a substring of the test name and value is a bug number.
// TODO(ahentz): make sure we clean this list up frequently.
const std::map<string, string>& GetKnownBrokenTests() {
static const std::map<string, string>* const kBrokenTests =
new std::map<string, string>({
// SpaceToBatchND only supports 4D tensors.
{R"(^\/space_to_batch_nd.*input_shape=\[1,4,4,4,1,1\])", "70848787"},
// BatchToSpaceND only supports 4D tensors.
{R"(^\/batch_to_space_nd.*input_shape=\[8,2,2,2,1,1\])", "70848787"},
// ResizeBilinear looks completely incompatible with Tensorflow
{R"(^\/resize_bilinear.*dtype=tf.int32)", "72401107"},
// Select kernel doesn't support broadcasting yet.
{R"(^\/where.*1,2,3,1)", "134692786"},
{R"(^\/div.*dtype=tf\.int64)", "119126484"},
{R"(^\/mul.*dtype=tf\.int64)", "119126484"},
{R"(^\/floor_div.*dtype=tf\.int64)", "119126484"},
{R"(^\/squared_difference.*dtype=tf\.int64)", "119126484"},
});
return *kBrokenTests;
}
// Additional list of tests that are expected to fail when
// --test_arg=--ignore_known_bugs=false
// and
// --test_arg=--use_nnapi=true
// Note that issues related to lack of NNAPI support for a particular op are
// handled separately; this list is specifically for broken cases where
// execution produces broken output.
// Key is a substring of the test name and value is a bug number.
const std::map<string, string>& GetKnownBrokenNnapiTests() {
static const std::map<string, string>* const kBrokenNnapiTests =
new std::map<string, string>({
// Certain NNAPI kernels silently fail with int32 types.
{R"(^\/add.*dtype=tf\.int32)", "122987564"},
{R"(^\/concat.*dtype=tf\.int32)", "122987564"},
{R"(^\/mul.*dtype=tf\.int32)", "122987564"},
{R"(^\/space_to_depth.*dtype=tf\.int32)", "122987564"},
// Certain NNAPI fully_connected shape permutations fail.
{R"(^\/fully_connected_constant_filter=True.*shape1=\[3,3\])",
"122987564"},
{R"(^\/fully_connected_constant_filter=True.*shape1=\[4,4\])",
"122987564"},
{R"(^\/fully_connected.*shape1=\[3,3\].*transpose_b=True)",
"122987564"},
{R"(^\/fully_connected.*shape1=\[4,4\].*shape2=\[4,1\])",
"122987564"},
});
return *kBrokenNnapiTests;
}
// List of quantize tests that are probably to fail.
// Quantized tflite models has high diff error with tensorflow models.
// Key is a substring of the test name and value is a bug number.
// TODO(b/134594898): Remove these bugs and corresponding codes or move them to
// kBrokenTests after b/134594898 is fixed.
const std::map<string, string>& GetKnownQuantizeBrokenTests() {
static const std::map<string, string>* const kQuantizeBrokenTests =
new std::map<string, string>({
{R"(^\/conv.*fully_quantize=True)", "134594898"},
{R"(^\/depthwiseconv.*fully_quantize=True)", "134594898"},
{R"(^\/sum.*fully_quantize=True)", "134594898"},
{R"(^\/l2norm.*fully_quantize=True)", "134594898"},
{R"(^\/prelu.*fully_quantize=True)", "156112683"},
});
return *kQuantizeBrokenTests;
}
// Allows test data to be unarchived into a temporary directory and makes
// sure those temporary directories are removed later.
class ArchiveEnvironment : public ::testing::Environment {
public:
~ArchiveEnvironment() override {}
// Delete all temporary directories on teardown.
void TearDown() override {
for (const auto& dir : temporary_directories_) {
tensorflow::int64 undeleted_dirs, undeleted_files;
TF_CHECK_OK(
env->DeleteRecursively(dir, &undeleted_dirs, &undeleted_files));
}
temporary_directories_.clear();
}
// Unarchive `archive` file into a new temporary directory `out_dir`.
tensorflow::Status UnArchive(const string& zip, const string& tar,
string* out_dir) {
string dir;
TF_CHECK_OK(MakeTemporaryDirectory(&dir));
tensorflow::SubProcess proc;
if (!zip.empty()) {
string unzip_binary = *FLAGS_unzip_binary_path;
TF_CHECK_OK(env->FileExists(unzip_binary));
TF_CHECK_OK(env->FileExists(zip));
proc.SetProgram(unzip_binary, {"unzip", "-d", dir, zip});
} else {
string tar_binary = *FLAGS_tar_binary_path;
TF_CHECK_OK(env->FileExists(tar_binary));
TF_CHECK_OK(env->FileExists(tar));
// 'o' needs to be explicitly set on Android so that
// untarring works as non-root (otherwise tries to chown
// files, which fails)
proc.SetProgram(tar_binary, {"tar", "xfo", tar, "-C", dir});
}
proc.SetChannelAction(tensorflow::CHAN_STDOUT, tensorflow::ACTION_PIPE);
proc.SetChannelAction(tensorflow::CHAN_STDERR, tensorflow::ACTION_PIPE);
if (!proc.Start())
return tensorflow::Status(tensorflow::error::UNKNOWN,
"unzip couldn't start");
string out, err;
int status = proc.Communicate(nullptr, &out, &err);
if (WEXITSTATUS(status) == 0) {
*out_dir = dir;
return tensorflow::Status::OK();
} else {
return tensorflow::Status(tensorflow::error::UNKNOWN,
"unzip failed. "
"stdout:\n" +
out + "\nstderr:\n" + err);
}
}
private:
// Make a temporary directory and return its name in `temporary`.
tensorflow::Status MakeTemporaryDirectory(string* temporary) {
if (env->LocalTempFilename(temporary)) {
TF_CHECK_OK(env->CreateDir(*temporary));
temporary_directories_.push_back(*temporary);
return tensorflow::Status::OK();
}
return tensorflow::Status(tensorflow::error::UNKNOWN,
"make temporary directory failed");
}
std::vector<string> temporary_directories_;
};
// Return the singleton archive_environment.
ArchiveEnvironment* archive_environment() {
static ArchiveEnvironment* env = new ArchiveEnvironment;
return env;
}
// Read the manifest.txt out of the unarchived archive file. Specifically
// `original_file` is the original zip file for error messages. `dir` is
// the temporary directory where the archive file has been unarchived and
// `test_paths` is the list of test prefixes that were in the manifest.
// Note, it is an error for a manifest to contain no tests.
tensorflow::Status ReadManifest(const string& original_file, const string& dir,
std::vector<string>* test_paths) {
// Read the newline delimited list of entries in the manifest.
std::ifstream manifest_fp(dir + "/manifest.txt");
string manifest((std::istreambuf_iterator<char>(manifest_fp)),
std::istreambuf_iterator<char>());
size_t pos = 0;
int added = 0;
while (true) {
size_t end_pos = manifest.find('\n', pos);
if (end_pos == string::npos) break;
string filename = manifest.substr(pos, end_pos - pos);
test_paths->push_back(dir + "/" + filename);
pos = end_pos + 1;
added += 1;
}
if (!added) {
string message = "Test had no examples: " + original_file;
return tensorflow::Status(tensorflow::error::UNKNOWN, message);
}
return tensorflow::Status::OK();
}
// Get a list of tests from either zip or tar file
std::vector<string> UnarchiveAndFindTestNames(const string& zip_file,
const string& tar_file) {
if (zip_file.empty() && tar_file.empty()) {
TF_CHECK_OK(tensorflow::Status(tensorflow::error::UNKNOWN,
"Neither zip_file nor tar_file was given"));
}
string decompress_tmp_dir;
TF_CHECK_OK(archive_environment()->UnArchive(zip_file, tar_file,
&decompress_tmp_dir));
std::vector<string> stuff;
if (!zip_file.empty()) {
TF_CHECK_OK(ReadManifest(zip_file, decompress_tmp_dir, &stuff));
} else {
TF_CHECK_OK(ReadManifest(tar_file, decompress_tmp_dir, &stuff));
}
return stuff;
}
class OpsTest : public ::testing::TestWithParam<string> {};
TEST_P(OpsTest, RunZipTests) {
string test_path_and_label = GetParam();
string test_path = test_path_and_label;
string label = test_path_and_label;
size_t end_pos = test_path_and_label.find(' ');
if (end_pos != string::npos) {
test_path = test_path_and_label.substr(0, end_pos);
label = test_path_and_label.substr(end_pos + 1);
}
string tflite_test_case = test_path + "_tests.txt";
string tflite_dir = test_path.substr(0, test_path.find_last_of('/'));
string test_name = label.substr(label.find_last_of('/'));
std::ifstream tflite_stream(tflite_test_case);
ASSERT_TRUE(tflite_stream.is_open()) << tflite_test_case;
tflite::testing::TfLiteDriver test_driver(
FLAGS_use_nnapi ? TfLiteDriver::DelegateType::kNnapi
: TfLiteDriver::DelegateType::kNone);
bool fully_quantize = false;
if (label.find("fully_quantize=True") != std::string::npos) {
// TODO(b/134594898): Tighten this constraint.
test_driver.SetThreshold(0.2, 0.1);
fully_quantize = true;
}
test_driver.SetModelBaseDir(tflite_dir);
auto broken_tests = GetKnownBrokenTests();
if (FLAGS_use_nnapi) {
auto kBrokenNnapiTests = GetKnownBrokenNnapiTests();
broken_tests.insert(kBrokenNnapiTests.begin(), kBrokenNnapiTests.end());
}
auto quantize_broken_tests = GetKnownQuantizeBrokenTests();
bool result = tflite::testing::ParseAndRunTests(&tflite_stream, &test_driver);
string message = test_driver.GetErrorMessage();
if (!fully_quantize) {
string bug_number;
for (const auto& p : broken_tests) {
if (RE2::PartialMatch(test_name, p.first)) {
bug_number = p.second;
break;
}
}
if (bug_number.empty()) {
if (FLAGS_use_nnapi && FLAGS_ignore_unsupported_nnapi && !result) {
EXPECT_EQ(message, string("Failed to invoke interpreter")) << message;
} else {
EXPECT_TRUE(result) << message;
}
} else {
if (FLAGS_ignore_known_bugs) {
EXPECT_FALSE(result) << "Test was expected to fail but is now passing; "
"you can mark http://b/"
<< bug_number << " as fixed! Yay!";
} else {
EXPECT_TRUE(result)
<< message << ": Possibly due to http://b/" << bug_number;
}
}
} else {
if (!result) {
string bug_number;
// See if the tests are potential quantize failures.
for (const auto& p : quantize_broken_tests) {
if (RE2::PartialMatch(test_name, p.first)) {
bug_number = p.second;
break;
}
}
EXPECT_FALSE(bug_number.empty());
}
}
}
struct ZipPathParamName {
template <class ParamType>
string operator()(const ::testing::TestParamInfo<ParamType>& info) const {
string param_name = info.param;
size_t last_slash = param_name.find_last_of("\\/");
if (last_slash != string::npos) {
param_name = param_name.substr(last_slash);
}
for (size_t index = 0; index < param_name.size(); ++index) {
if (!isalnum(param_name[index]) && param_name[index] != '_')
param_name[index] = '_';
}
return param_name;
}
};
INSTANTIATE_TEST_CASE_P(tests, OpsTest,
::testing::ValuesIn(UnarchiveAndFindTestNames(
*FLAGS_zip_file_path, *FLAGS_tar_file_path)),
ZipPathParamName());
} // namespace testing
} // namespace tflite
int main(int argc, char** argv) {
::testing::AddGlobalTestEnvironment(tflite::testing::archive_environment());
std::vector<tensorflow::Flag> flags = {
tensorflow::Flag(
"ignore_known_bugs", &tflite::testing::FLAGS_ignore_known_bugs,
"If a particular model is affected by a known bug, the "
"corresponding test should expect the outputs to not match."),
tensorflow::Flag(
"tar_file_path", tflite::testing::FLAGS_tar_file_path,
"Required (or zip_file_path): Location of the test tar file."),
tensorflow::Flag(
"zip_file_path", tflite::testing::FLAGS_zip_file_path,
"Required (or tar_file_path): Location of the test zip file."),
tensorflow::Flag("unzip_binary_path",
tflite::testing::FLAGS_unzip_binary_path,
"Location of a suitable unzip binary."),
tensorflow::Flag("tar_binary_path",
tflite::testing::FLAGS_tar_binary_path,
"Location of a suitable tar binary."),
tensorflow::Flag("use_nnapi", &tflite::testing::FLAGS_use_nnapi,
"Whether to enable the NNAPI delegate"),
tensorflow::Flag("ignore_unsupported_nnapi",
&tflite::testing::FLAGS_ignore_unsupported_nnapi,
"Don't fail tests just because delegation to NNAPI "
"is not possible")};
bool success = tensorflow::Flags::Parse(&argc, argv, flags);
if (!success || (argc == 2 && !strcmp(argv[1], "--helpfull"))) {
fprintf(stderr, "%s", tensorflow::Flags::Usage(argv[0], flags).c_str());
return 1;
}
::tflite::LogToStderr();
// TODO(mikie): googletest arguments do not work - maybe the tensorflow flags
// parser removes them?
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/generated_examples_zip_test.cc | C++ | apache-2.0 | 15,593 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converts a model's graph def into a tflite model with MLIR-based conversion."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.python import test_util as tflite_test_util
from tensorflow.lite.testing import zip_test_utils
from tensorflow.python.platform import resource_loader
def mlir_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
"""Convert a model's graph def into a tflite model with MLIR-based conversion.
Args:
options: A lite.testing.generate_examples_lib.Options instance.
graph_def: A GraphDef object.
input_tensors: List of input tensor tuples `(name, shape, type)`.
output_tensors: List of output tensors (names).
**kwargs: Extra parameters.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
test_params = kwargs.get("test_params", {})
# TODO(b/146025965): Rename ExtraTocoOptions to ExtraConvertOptions or
# something else.
extra_toco_options = kwargs.get("extra_toco_options",
zip_test_utils.ExtraTocoOptions())
input_arrays = [x[0] for x in input_tensors]
input_shapes = zip_test_utils.get_input_shapes_map(input_tensors)
tflite_model = None
log = ""
with tempfile.NamedTemporaryFile() as graphdef_file:
graphdef_file.write(graph_def.SerializeToString())
graphdef_file.flush()
converter = tf.lite.TFLiteConverter.from_frozen_graph(
graphdef_file.name, input_arrays, output_tensors, input_shapes)
converter.allow_custom_ops = extra_toco_options.allow_custom_ops
converter.experimental_new_quantizer = options.mlir_quantizer
if options.run_with_flex:
converter.supported_ops = set([
tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS])
if test_params.get("dynamic_range_quantize", False):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
if test_params.get("fully_quantize", False):
converter.optimizations = [tf.lite.Optimize.DEFAULT]
# Read the input range for the representative dataset from parameters.
min_value, max_value = test_params.get("input_range", (-1, 1))
def representative_dataset(input_tensors):
calibration_inputs = []
for _, shape, _ in input_tensors:
if shape:
dims = [1 if dim.value is None else dim.value for dim in shape.dims]
calibration_inputs.append(
np.random.uniform(min_value, max_value,
tuple(dims)).astype(np.float32))
return calibration_inputs
def representative_dataset_gen():
for _ in range(100):
yield representative_dataset(input_tensors)
if test_params.get("quant_16x8", False):
converter.target_spec.supported_ops = [
tf.lite.OpsSet.\
EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
]
else:
converter.target_spec.supported_ops = [
tf.lite.OpsSet.TFLITE_BUILTINS_INT8
]
converter.representative_dataset = representative_dataset_gen
if extra_toco_options.inference_input_type:
converter.inference_input_type = (
extra_toco_options.inference_input_type)
if extra_toco_options.inference_output_type:
converter.inference_output_type = (
extra_toco_options.inference_output_type)
try:
tflite_model = converter.convert()
if options.expected_ops_in_converted_model:
ops_list = tflite_test_util.get_ops_list(tflite_model)
for expected_op in options.expected_ops_in_converted_model:
if expected_op not in ops_list:
# Force the test to fail.
tflite_model = None
raise ValueError(
"{} op not found in the converted model".format(expected_op))
except Exception as e: # pylint: disable=broad-except
log = str(e)
return tflite_model, log
def mlir_convert_file(graph_def_filename,
input_tensors,
output_tensors,
quantization_params=None,
additional_flags=""):
"""Convert a graphdef file into a tflite model with MLIR-based conversion.
NOTE: this currently shells out to the MLIR binary binary, but we would like
convert to Python API tooling in the future.
Args:
graph_def_filename: A GraphDef file.
input_tensors: List of input tensor tuples `(name, shape, type)`. name
should be a string. shape should be a tuple of integers. type should be a
string, for example 'DT_FLOAT'
output_tensors: List of output tensors (names).
quantization_params: parameters `(inference_type, min_values, max_values)`
to quantize the model.
additional_flags: A string of additional command line flags to be passed
to MLIR converter.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
bin_path = resource_loader.get_path_to_datafile(
"../../../../compiler/mlir/lite/tf_tfl_translate")
with tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
input_shapes = []
for input_tensor in input_tensors:
shape = input_tensor[1]
input_shapes.append(",".join([str(dim) for dim in shape]))
input_shapes_str = ":".join(input_shapes)
input_types = ",".join([x[2] for x in input_tensors])
quant_flags = ""
if quantization_params is not None:
min_vals = ",".join([str(val) for val in quantization_params[1]])
max_vals = ",".join([str(val) for val in quantization_params[2]])
quant_flags = ("-tf-inference-type=" + quantization_params[0] +
" -tf-input-min-values='" + min_vals +
"' -tf-input-max-values='" + max_vals + "' " +
"-emit-quant-adaptor-ops ")
cmd = ("%s -tf-input-arrays=%s -tf-input-data-types=%s -tf-input-shapes=%s "
"-tf-output-arrays=%s " + quant_flags + additional_flags +
"%s -o %s")
cmd = cmd % (
bin_path,
",".join([x[0] for x in input_tensors]),
input_types,
input_shapes_str,
",".join(output_tensors),
graph_def_filename,
output_file.name,
)
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
def mlir_convert_saved_model(saved_model_dir,
is_signature_def_saved_model,
tags=(),
exported_names=(),
additional_flags=""):
"""Convert a saved_model into a tflite model with MLIR-based conversion.
Args:
saved_model_dir: Saved model dir.
is_signature_def_saved_model: Whether the SavedModel SignatureDef importer
or ObjectGraph importer should be used.
tags: Set of tags identifying the MetaGraphDef within the SavedModel to
analyze. All tags in the tag set must be present.
exported_names: Names to export from SavedModel.
additional_flags: A string of additional command line flags to be passed to
MLIR converter.
Returns:
output tflite model, log_txt from conversion
or None, log_txt if it did not convert properly.
"""
bin_path = resource_loader.get_path_to_datafile(
"../../../../compiler/mlir/lite/tf_tfl_translate")
with tempfile.NamedTemporaryFile() as output_file, \
tempfile.NamedTemporaryFile("w+") as stdout_file:
tags_str = ",".join(tags)
exported_names_str = ",".join(exported_names)
saved_model_flag = "-savedmodel-objectgraph-to-mlir"
if is_signature_def_saved_model:
saved_model_flag = "-savedmodel-signaturedefs-to-mlir"
cmd = ("%s %s --tf-savedmodel-tags=%s --tf-savedmodel-exported-names=%s " +
additional_flags + " %s --o=%s")
cmd = cmd % (
bin_path,
saved_model_flag,
tags_str,
exported_names_str,
saved_model_dir,
output_file.name,
)
exit_code = os.system(cmd)
log = (
cmd + "exited with code %d" % exit_code + "\n------------------\n" +
stdout_file.read())
return (None if exit_code != 0 else output_file.read()), log
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/mlir_convert.py | Python | apache-2.0 | 9,267 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for batchmatmul."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function("make_batchmatmul_tests")
def make_batchmatmul_tests(options):
"""Make a set of tests to do basic batch matrix multiply."""
test_parameters = [
{
"dtype": [tf.float32],
"shapes": [((3, 4, 7), (7, 9), (3, 4, 7), (7, 9)),
((None, 4, 5), (None, 5, 6), (3, 4, 5), (3, 5, 6)),
((None, 1, 3, 4), (None, 4, 2), (2, 1, 3, 4), (5, 4, 2))],
"adjoint_b": [False, True],
"adjoint_a": [False, True],
"rhs_constant": [False],
"fully_quantize": [False, True],
},
]
def swap_last_two_dims(*args):
"""Return a tuple with the last two dimensions swapped."""
return args[:-2] + (args[-1],) + (args[-2],)
def build_graph(parameters):
"""Build a simple graph with BatchMatMul."""
placeholder0_shape = parameters["shapes"][0]
adj_a = parameters["adjoint_a"]
adj_b = parameters["adjoint_b"]
rhs_constant = parameters["rhs_constant"]
if adj_a:
placeholder0_shape = swap_last_two_dims(*placeholder0_shape)
input0_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=placeholder0_shape)
if rhs_constant:
if adj_b:
constant1_shape = swap_last_two_dims(*parameters["shapes"][3])
else:
constant1_shape = parameters["shapes"][3]
data = create_tensor_data(
parameters["dtype"], constant1_shape, min_value=-1.0, max_value=1.0)
input1_constant = tf.constant(
data, shape=constant1_shape, dtype=parameters["dtype"])
out = tf.matmul(
input0_tensor, input1_constant, adjoint_a=adj_a, adjoint_b=adj_b)
return [input0_tensor], [out]
else:
if adj_b:
placeholder1_shape = swap_last_two_dims(*parameters["shapes"][1])
else:
placeholder1_shape = parameters["shapes"][1]
input1_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=placeholder1_shape)
out = tf.matmul(
input0_tensor, input1_tensor, adjoint_a=adj_a, adjoint_b=adj_b)
return [input0_tensor, input1_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
input0_shape = parameters["shapes"][2]
adj_a = parameters["adjoint_a"]
adj_b = parameters["adjoint_b"]
rhs_constant = parameters["rhs_constant"]
if adj_a:
input0_shape = swap_last_two_dims(*input0_shape)
input0_value = create_tensor_data(
parameters["dtype"], input0_shape, min_value=-1.0, max_value=1.0)
if rhs_constant:
output_values = sess.run(
outputs, feed_dict=dict(zip(inputs, [input0_value])))
return [input0_value], output_values
else:
input1_shape = parameters["shapes"][3] if not adj_b else \
swap_last_two_dims(*parameters["shapes"][3])
input1_value = create_tensor_data(
parameters["dtype"], input1_shape, min_value=-1.0, max_value=1.0)
output_values = sess.run(
outputs, feed_dict=dict(zip(inputs, [input0_value, input1_value])))
return [input0_value, input1_value], output_values
options.use_experimental_converter = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/batchmatmul.py | Python | apache-2.0 | 4,410 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for broadcast_gradient_args."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_broadcast_gradient_args_tests(options):
"""Make a set of tests to do broadcast_gradient_args."""
test_parameters = [{
'input_case': ['ALL_EQUAL', 'ONE_DIM', 'NON_BROADCASTABLE'],
'dtype': [tf.dtypes.int32, tf.dtypes.int64],
}]
def build_graph(parameters):
"""Build the op testing graph."""
input1 = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input1')
input2 = tf.compat.v1.placeholder(dtype=parameters['dtype'], name='input2')
output1, output2 = tf.raw_ops.BroadcastGradientArgs(s0=input1, s1=input2)
return [input1, input2], [output1, output2]
def build_inputs(parameters, sess, inputs, outputs):
dtype = parameters['dtype'].as_numpy_dtype()
if parameters['input_case'] == 'ALL_EQUAL':
values = [
np.array([2, 4, 1, 3], dtype=dtype),
np.array([2, 4, 1, 3], dtype=dtype)
]
elif parameters['input_case'] == 'ONE_DIM':
values = [
np.array([2, 4, 1, 3], dtype=dtype),
np.array([2, 1, 1, 3], dtype=dtype)
]
elif parameters['input_case'] == 'NON_BROADCASTABLE':
values = [
np.array([2, 4, 1, 3], dtype=dtype),
np.array([2, 5, 1, 3], dtype=dtype)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=2)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/broadcast_gradient_args.py | Python | apache-2.0 | 2,681 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for broadcast_to."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function("make_broadcast_to_tests")
def make_broadcast_to_tests(options):
"""Make a set of tests to do broadcast_to."""
# Chose a set of parameters
test_parameters = [{
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 2], [2, 3, 4], [1], [2, 5, 2, 3, 4]],
"output_shape": [[3, 1, 2], [5, 2, 3, 4], [10, 10],
[1, 2, 1, 2, 5, 2, 3, 4]],
}, {
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[3, 2, 3, 4, 5, 6, 7, 8]],
"output_shape": [[3, 2, 3, 4, 5, 6, 7, 8]],
}, {
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[1, 3, 1, 2, 1, 4, 1, 1]],
"output_shape": [[2, 3, 1, 2, 2, 4, 1, 1]],
}, {
"input_dtype": [tf.float32, tf.int32],
"input_shape": [[2, 1, 1, 2, 1, 4, 1, 1]],
"output_shape": [[2, 3, 2, 2, 2, 4, 1, 1]],
}]
def build_graph(parameters):
"""Build the graph for cond tests."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.broadcast_to(input_tensor, shape=parameters["output_shape"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"])
]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=16)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/broadcast_to.py | Python | apache-2.0 | 2,671 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for complex abs."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_complex_abs_tests(options):
"""Make a set of tests to do complex abs."""
# Chose a set of parameters
test_parameters = [{
"dtype": [tf.complex64],
"input_shape": [[], [1], [2, 3], [1, 3, 4, 3], [2, 2, 3, 4, 5, 6]],
"Tout": [tf.float32]
}, {
"dtype": [tf.complex128],
"input_shape": [[], [1], [2, 3], [1, 3, 4, 3], [2, 2, 3, 4, 5, 6]],
"Tout": [tf.float64]
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.raw_ops.ComplexAbs(x=input_tensor, Tout=parameters["Tout"])
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
parameters["dtype"].as_numpy_dtype,
parameters["input_shape"],
min_value=-10,
max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/complex_abs.py | Python | apache-2.0 | 2,208 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for cond."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.framework import test_util
@register_make_test_function("make_cond_tests")
@test_util.enable_control_flow_v2
def make_cond_tests(options):
"""Make a set of tests to do relu1."""
# Chose a set of parameters
test_parameters = [{
# Note: The `tf.string` test case also serves as a regression test to
# ensure that branch subgraph with dynamically allocated inputs/outputs
# are handled correctly.
"dtype": [tf.float32, tf.string],
"pred": [False, True],
}]
def build_graph(parameters):
"""Build the graph for cond tests."""
input1 = tf.placeholder(dtype=parameters["dtype"], shape=(1,))
input2 = tf.placeholder(dtype=parameters["dtype"], shape=(1,))
# MLIR TFLite converter can't handle scalar inputs. This is a workaround
# to input (1,) tensors and then reshape to scalar.
# TODO(b/129003347): Remove the workaround after scalar inputs are
# supported.
pred = tf.placeholder(dtype=tf.bool, shape=(1,))
pred_scalar = tf.reshape(pred, ())
out = tf.cond(pred_scalar, lambda: input1, lambda: input2)
return [input1, input2, pred], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(parameters["dtype"], (1,)),
create_tensor_data(parameters["dtype"], (1,)),
np.array([parameters["pred"]], dtype=np.bool),
]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/cond.py | Python | apache-2.0 | 2,660 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for control_dep."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
TEST_INPUT_DEPTH = 3
@register_make_test_function()
def make_control_dep_tests(options):
"""Make a set of tests that use control dependencies."""
test_parameters = [{
"input_shape": [[], [1, 1, 1, 1], [1, 15, 14, 1], [3, 15, 14, 3]],
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
filter_value = tf.zeros((3, 3, TEST_INPUT_DEPTH, 8), tf.float32)
assert_op = tf.compat.v1.assert_greater_equal(input_tensor,
input_tensor - 1)
with tf.control_dependencies([assert_op]):
out = tf.nn.conv2d(
input_tensor, filter_value, strides=(1, 1, 1, 1), padding="SAME")
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(tf.float32, parameters["input_shape"])
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=3)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/control_dep.py | Python | apache-2.0 | 2,236 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for exp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_conv3d_tests(options):
"""Make a set of tests to do conv3d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[2, 3, 4, 5, 3], [2, 5, 6, 8, 3]],
"filter_shape": [[2, 2, 2, 3, 2], [1, 2, 2, 3, 2]],
"strides": [(1, 1, 1, 1, 1), (1, 1, 1, 2, 1), (1, 1, 2, 2, 1),
(1, 2, 1, 2, 1), (1, 2, 2, 2, 1)],
"dilations": [(1, 1, 1, 1, 1)],
"padding": ["SAME", "VALID"],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
filter_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="filter",
shape=parameters["filter_shape"])
out = tf.nn.conv3d(
input_tensor,
filter_tensor,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"])
return [input_tensor, filter_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9),
create_tensor_data(
parameters["input_dtype"],
parameters["filter_shape"],
min_value=-3,
max_value=3)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/conv3d.py | Python | apache-2.0 | 2,718 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for conv followed with bias Add and activations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def make_conv_bias_activation_tests(activation_op):
"""Make a set of tests to do convolution with activation and bias.
This test will create multiple consecutive convolutions with NCHW layout to
make sure that the tranformations to NHWC works as expected. Note this
doesn't check any performance so manual checking of the generated model is
advised.
Args:
activation_op: The activation op to be used in the test.
Returns:
The function that creates the test.
"""
def create_test(options):
"""Actual function that generates examples."""
test_parameters = [
{
"input_shape": [[1, 3, 4, 3]],
"filter_shape": [[2, 3], [3, 3]],
"filter_2_shape": [[2, 1, 1, 3]],
"strides": [[1, 1, 1, 1]],
"dilations": [[1, 1, 1, 1]],
"data_format": ["NCHW"],
"channel_multiplier": [1, 2],
"fully_quantize": [False],
"dynamic_range_quantize": [False],
},
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
# TF CPU doesn't support cases with NCHW. Instead
# use XLA which doesn't have the same restrictions.
@tf.function(jit_compile=True)
def add_conv(input_tensor, filter_input, parameters):
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding="VALID",
data_format=parameters["data_format"])
return out
def add_bias_add(data_input, filter_shape):
bias_input = create_tensor_data(np.float32, (filter_shape[-1],))
out = tf.nn.bias_add(data_input, bias_input, data_format="NHWC")
return out
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-10, max_value=10)
input_tensors = [input_tensor]
if parameters["data_format"] == "NCHW":
out = add_conv(input_tensor, filter_input, parameters)
else:
out = tf.nn.conv2d(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding="VALID",
data_format=parameters["data_format"])
out = add_bias_add(out, filter_shape)
out = activation_op(out)
# Add another conv + bias_add + activation.
# Create constant filter for the second conv2d.
filter_input_2 = create_tensor_data(
np.float32, parameters["filter_2_shape"], min_value=-10, max_value=10)
if parameters["data_format"] == "NCHW":
out = add_conv(out, filter_input_2, parameters)
else:
out = tf.nn.conv2d(
out,
filter_input_2,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding="VALID",
data_format=parameters["data_format"])
out = add_bias_add(out, filter_shape)
out = activation_op(out)
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for conv with activation."""
input_shape, _ = get_tensor_shapes(parameters)
values = [
create_tensor_data(
np.float32, input_shape, min_value=-1, max_value=1)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=2)
return create_test
@register_make_test_function()
def make_conv_bias_relu6_tests(options):
"""Make a set of tests to do conv_bias_relu6."""
return make_conv_bias_activation_tests(tf.nn.relu6)(options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/conv_bias_activation.py | Python | apache-2.0 | 5,338 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for cumsum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_cumsum_tests(options):
"""Make a set of tests to do cumsum."""
test_parameters = [{
"shape": [(3, 6), (8, 9, 7)],
"dtype": [tf.int32, tf.int64, tf.float32],
"axis": [0, 1],
"exclusive": [True, False],
"reverse": [True, False],
}]
def build_graph(parameters):
"""Build the cumsum op testing graph."""
input1 = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=parameters["shape"])
out = tf.math.cumsum(
input1,
parameters["axis"],
exclusive=parameters["exclusive"],
reverse=parameters["reverse"])
return [input1], [out]
def build_inputs(parameters, sess, inputs, outputs):
input1 = create_tensor_data(parameters["dtype"], parameters["shape"])
return [input1], sess.run(outputs, feed_dict=dict(zip(inputs, [input1])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/cumsum.py | Python | apache-2.0 | 2,030 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for dense_image_warp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
# Placeholder for internal API
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_dense_image_warp_tests(options):
"""Make a set of tests to do dense_image_warp."""
test_parameters = [{
'input_size': [[2, 4, 4, 1], [2, 4, 3, 3], [3, 7, 9, 2]],
'flow_size': [[2, 4, 4, 2], [2, 4, 3, 2], [3, 7, 9, 2]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name='input', shape=parameters['input_size'])
flow_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name='flow', shape=parameters['flow_size'])
output = dense_image_warp_annotated(input_tensor, flow_tensor)
return [input_tensor, flow_tensor], [output]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
tf.float32, parameters['input_size'], min_value=-10, max_value=10),
create_tensor_data(
tf.float32, parameters['flow_size'], min_value=-10, max_value=10)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
options.expected_ops_in_converted_model = ['DenseImageWarp']
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
extra_toco_options,
expected_tf_failures=6)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/dense_image_warp.py | Python | apache-2.0 | 2,556 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for dynamic_rnn."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import rnn
@register_make_test_function("make_dynamic_rnn_tests")
@test_util.enable_control_flow_v2
def make_dynamic_rnn_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batches": [4, 2],
"time_step_size": [4, 3],
"input_vec_size": [3, 2],
"num_cells": [4, 2],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
input_shape = (num_batches, time_step_size, input_vec_size)
input_tensor = tf.placeholder(dtype=parameters["dtype"], shape=input_shape)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(num_cells, activation=tf.nn.relu)
output, _ = rnn.dynamic_rnn(
lstm_cell, input_tensor, dtype=parameters["dtype"])
return [input_tensor], [output]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
sess.run(tf.global_variables_initializer())
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_shape = (num_batches, time_step_size, input_vec_size)
input_value = create_tensor_data(parameters["dtype"], input_shape)
output_values = sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
return [input_value], output_values
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/dynamic_rnn.py | Python | apache-2.0 | 2,891 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for einsum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.framework import test_util
@register_make_test_function("make_einsum_tests")
@test_util.enable_control_flow_v2
def make_einsum_tests(options):
"""Make a set of tests to do basic einsum ops."""
test_parameters = [
{
"dtype": [tf.float32],
"shapes": [((3, 4, 5), (3, 5, 6), "ijk,ikm->ijm"),
((3, 4, 5), (5, 6), "ijk,km->ijm"),
((2, 5, 7), (5, 2), "LBH,BL->BH"),
((2, 5, 7), (5, 3, 2), "LBH,BKL->BKH"),
((2, 5, 7, 3), (2, 4, 7, 3), "BFNH,BTNH->BNFT"),
((2, 5, 7, 3), (7, 3, 4), "BFND,NDH->BFH"),
((3, 4, 5), (5, 6, 2), "BFD,DNH->BFNH"),
((7, 11, 13), (7, 11, 13, 5), "BIN,BINJ->BIJ"),
((7, 11, 19), (7, 11, 13, 19), "BIJ,BINJ->BIN"),
((5, 13, 3, 11), (5, 11, 13, 8), "ACBE,AECD->ABCD"),
((5, 11, 7, 3), (5, 8, 7, 3), "AECD,ABCD->ACBE")],
},
]
def build_graph(parameters):
"""Build a simple graph with einsum Op."""
input0_shape = parameters["shapes"][0]
input1_shape = parameters["shapes"][1]
equation = parameters["shapes"][2]
input0_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=input0_shape)
input1_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"], shape=input1_shape)
out = tf.einsum(equation, input0_tensor, input1_tensor)
return [input0_tensor, input1_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
input0_shape = parameters["shapes"][0]
input1_shape = parameters["shapes"][1]
input0_value = create_tensor_data(parameters["dtype"], input0_shape)
input1_value = create_tensor_data(parameters["dtype"], input1_shape)
output_values = sess.run(
outputs, feed_dict=dict(zip(inputs, [input0_value, input1_value])))
return [input0_value, input1_value], output_values
options.use_experimental_converter = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/einsum.py | Python | apache-2.0 | 3,282 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for identifying dilated conv."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_identify_dilated_conv_tests(options):
"""Make a set of tests to do dilated convolution."""
test_parameters = [
{
"input_shape": [[1, 3, 4, 3], [4, 6, 6, 1]],
"filter_shape": [[1, 1], [2, 3], [3, 3]],
"strides": [[1, 1, 1, 1], [1, 2, 3, 1]],
"dilations": [[1, 3, 2, 1], [1, 2, 2, 1], [1, 2, 1, 1]],
"padding": ["VALID", "SAME"],
"data_format": ["NHWC"],
"constant_filter": [True, False],
"channel_multiplier": [1, 2],
},
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_shape"]
filter_shape = filter_size + [
input_shape[3], parameters["channel_multiplier"]
]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
# Get filter input either as a placeholder or constants. Also get a list of
# the input tensors that are represented as placeholders.
if parameters["constant_filter"]:
filter_input = create_tensor_data(
np.float32, filter_shape, min_value=-10, max_value=10)
input_tensors = [input_tensor]
else:
filter_input = tf.compat.v1.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensors = [input_tensor, filter_input]
# Use `tf.nn.convolution` here since it will create the `batch_to_space` and
# the `space_to_batch` ops respectively.
out = tf.nn.convolution(
input_tensor,
filter_input,
strides=parameters["strides"],
dilations=parameters["dilations"],
padding=parameters["padding"],
data_format=parameters["data_format"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
# Build list of input values either containing 1 tensor (input) or 2 tensors
# (input, filter) based on whether filter is constant or variable input.
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1)
]
if not parameters["constant_filter"]:
values.append(create_tensor_data(np.float32, filter_shape))
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=168)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/identify_dilated_conv.py | Python | apache-2.0 | 3,777 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for identifying dilated Conv1D."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_identify_dilated_conv1d_tests(options):
"""Make a set of tests to do 1D dilated convolution."""
test_parameters = [
{
"input_shape": [[1, 3, 3], [4, 6, 1]],
"filter_size": [1, 2, 3],
"stride": [1, 2],
"dilations": [1, 2, 3],
"padding": ["VALID", "SAME"],
"num_filters": [1, 2],
},
]
def get_tensor_shapes(parameters):
input_shape = parameters["input_shape"]
filter_size = parameters["filter_size"]
filter_shape = [filter_size, input_shape[2], parameters["num_filters"]]
return [input_shape, filter_shape]
def build_graph(parameters):
"""Build a conv graph given `parameters`."""
input_shape, filter_shape = get_tensor_shapes(parameters)
filter_input = tf.compat.v1.placeholder(
dtype=tf.float32, name="filter", shape=filter_shape)
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=input_shape)
input_tensors = [input_tensor, filter_input]
out = tf.nn.conv1d(
input_tensor,
filter_input,
stride=parameters["stride"],
dilations=parameters["dilations"],
padding=parameters["padding"])
return input_tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
input_shape, filter_shape = get_tensor_shapes(parameters)
values = [
create_tensor_data(np.float32, input_shape, min_value=-1, max_value=1),
create_tensor_data(np.float32, filter_shape)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=16)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/identify_dilated_conv1d.py | Python | apache-2.0 | 2,865 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for imag op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_imag_tests(options):
"""Make a set of tests to do imag op."""
# Chose a set of parameters
test_parameters = [{
"dtype": [tf.complex64, tf.complex128],
"input_shape": [[], [1], [2, 3], [1, 3, 4, 3], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.math.imag(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
parameters["dtype"].as_numpy_dtype,
parameters["input_shape"],
min_value=-10,
max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/imag.py | Python | apache-2.0 | 2,005 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for irfft2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_irfft2d_tests(options):
"""Make a set of tests to do irfft2d."""
test_parameters = [{
"input_dtype": [tf.complex64],
"input_shape": [[4, 3]],
"fft_length": [[4, 4], [2, 2], [2, 4]]
}, {
"input_dtype": [tf.complex64],
"input_shape": [[3, 8, 5]],
"fft_length": [[2, 4], [2, 8], [8, 8]]
}, {
"input_dtype": [tf.complex64],
"input_shape": [[3, 1, 9]],
"fft_length": [[1, 8], [1, 16]]
}]
def build_graph(parameters):
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.signal.irfft2d(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
rfft_length = []
rfft_length.append(parameters["input_shape"][-2])
rfft_length.append((parameters["input_shape"][-1] - 1) * 2)
rfft_input = create_tensor_data(np.float32, parameters["input_shape"])
rfft_result = np.fft.rfft2(rfft_input, rfft_length)
return [rfft_result], sess.run(
outputs, feed_dict=dict(zip(inputs, [rfft_result])))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/irfft2d.py | Python | apache-2.0 | 2,578 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for is_finite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_is_finite_tests(options):
"""Make a set of tests to do is_finite."""
test_parameters = [
{
"input_shape": [[100], [3, 15, 14, 3]],
},
]
def build_graph(parameters):
"""Build the graph for the test case."""
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.math.is_finite(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build the inputs for the test case."""
input_values = create_tensor_data(
np.float32, parameters["input_shape"], min_value=-10, max_value=10)
# Inject NaN and Inf value.
def random_index(shape):
result = []
for dim in shape:
result.append(np.random.randint(low=0, high=dim))
return tuple(result)
input_values[random_index(input_values.shape)] = np.Inf
input_values[random_index(input_values.shape)] = -np.Inf
input_values[random_index(input_values.shape)] = np.NAN
input_values[random_index(input_values.shape)] = tf.float32.max
input_values[random_index(input_values.shape)] = tf.float32.min
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/is_finite.py | Python | apache-2.0 | 2,480 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for max_pool_with_argmax."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_max_pool_with_argmax_tests(options):
"""Make a set of tests to do max_pool_with_argmax."""
test_parameters = [{
'input_size': [[2, 4, 2, 2], [2, 4, 3, 2]],
'pool_size': [(2, 2), (2, 1)],
'strides': [(2, 2)],
'padding': ['SAME', 'VALID'],
}, {
'input_size': [[2, 4, 10, 2], [2, 4, 11, 2], [2, 4, 12, 2]],
'pool_size': [(2, 2)],
'strides': [(2, 3)],
'padding': ['SAME', 'VALID'],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name='input', shape=parameters['input_size'])
updates, indices = tf.nn.max_pool_with_argmax(
input_tensor,
ksize=parameters['pool_size'],
strides=parameters['strides'],
padding=parameters['padding'],
output_dtype=tf.dtypes.int32)
return [input_tensor], [updates, indices]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
tf.float32, parameters['input_size'], min_value=-10, max_value=10)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/max_pool_with_argmax.py | Python | apache-2.0 | 2,559 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for parse example."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import string
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def create_example_data(feature_dtype, feature_shape):
"""Create structured example data."""
features = {}
if feature_dtype in (tf.float32, tf.float16, tf.float64):
data = np.random.rand(*feature_shape)
features["x"] = tf.train.Feature(
float_list=tf.train.FloatList(value=list(data)))
elif feature_dtype in (tf.int32, tf.uint8, tf.int64, tf.int16):
data = np.random.randint(-100, 100, size=feature_shape)
features["x"] = tf.train.Feature(
int64_list=tf.train.Int64List(value=list(data)))
elif feature_dtype == tf.string:
letters = list(string.ascii_uppercase)
data = "".join(np.random.choice(letters, size=10)).encode("utf-8")
features["x"] = tf.train.Feature(
bytes_list=tf.train.BytesList(value=[data]*feature_shape[0]))
example = tf.train.Example(features=tf.train.Features(feature=features))
return np.array([example.SerializeToString()])
@register_make_test_function("make_parse_example_tests")
def make_parse_example_tests(options):
"""Make a set of tests to use parse_example."""
# Chose a set of parameters
test_parameters = [{
"feature_dtype": [tf.string, tf.float32, tf.int64],
"is_dense": [True, False],
"feature_shape": [[1], [2], [16]],
}]
def build_graph(parameters):
"""Build the graph for parse_example tests."""
feature_dtype = parameters["feature_dtype"]
feature_shape = parameters["feature_shape"]
is_dense = parameters["is_dense"]
input_value = tf.compat.v1.placeholder(
dtype=tf.string, name="input", shape=[1])
if is_dense:
feature_default_value = np.zeros(shape=feature_shape)
if feature_dtype == tf.string:
feature_default_value = np.array(["missing"]*feature_shape[0])
features = {"x": tf.FixedLenFeature(shape=feature_shape,
dtype=feature_dtype,
default_value=feature_default_value)}
else: # Sparse
features = {"x": tf.VarLenFeature(dtype=feature_dtype)}
out = tf.parse_example(input_value, features)
output_tensor = out["x"]
if not is_dense:
output_tensor = out["x"].values
return [input_value], [output_tensor]
def build_inputs(parameters, sess, inputs, outputs):
feature_dtype = parameters["feature_dtype"]
feature_shape = parameters["feature_shape"]
input_values = [create_example_data(feature_dtype, feature_shape)]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
extra_toco_options = ExtraTocoOptions()
extra_toco_options.allow_custom_ops = True
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/parse_example.py | Python | apache-2.0 | 3,856 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for real op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_real_tests(options):
"""Make a set of tests to do real op."""
# Chose a set of parameters
test_parameters = [{
"dtype": [tf.complex64, tf.complex128],
"input_shape": [[], [1], [2, 3], [1, 3, 4, 3], [2, 2, 3, 4, 5, 6]],
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.math.real(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = create_tensor_data(
parameters["dtype"].as_numpy_dtype,
parameters["input_shape"],
min_value=-10,
max_value=10)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/real.py | Python | apache-2.0 | 2,005 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for reciprocal."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function("make_reciprocal_tests")
def make_reciprocal_tests(options):
"""Make a set of tests to do reciprocal."""
# Chose a set of parameters
test_parameters = [{
"input_dtype": [tf.float32, tf.int32, tf.int64],
"input_shape": [[1, 2], [1, 2, 3, 4], [10]],
}]
def build_graph(parameters):
"""Build the graph for cond tests."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.math.reciprocal(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_values = [
create_tensor_data(parameters["input_dtype"], parameters["input_shape"])
]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=6)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/reciprocal.py | Python | apache-2.0 | 2,077 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for rfft2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_rfft_tests(options):
"""Make a set of tests to do rfft."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8], [8, 8], [3, 8, 8], [3, 8]],
"fft_length": [None, [4], [8], [16]]
}]
def build_graph(parameters):
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.signal.rfft(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/rfft.py | Python | apache-2.0 | 2,141 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for rfft2d."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_rfft2d_tests(options):
"""Make a set of tests to do rfft2d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8, 8], [3, 8, 8], [3, 1, 16]],
"fft_length": [
None, [4, 4], [4, 8], [8, 4], [8, 8], [8, 16], [16, 8], [16, 16],
[1, 8], [1, 16]
]
}]
def build_graph(parameters):
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.signal.rfft2d(input_value, fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/rfft2d.py | Python | apache-2.0 | 2,235 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for segment_sum."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_segment_sum_tests(options):
"""Make a set of tests to do segment_sum."""
test_parameters = [
{
"data_shape": [[4, 4], [4], [4, 3, 2]],
"data_dtype": [tf.float32, tf.int32],
"segment_ids": [[0, 0, 1, 1], [0, 1, 2, 2], [0, 1, 2, 3],
[0, 0, 0, 0]],
},
]
def build_graph(parameters):
"""Build the segment_sum op testing graph."""
data = tf.compat.v1.placeholder(
dtype=parameters["data_dtype"],
name="data",
shape=parameters["data_shape"])
segment_ids = tf.constant(parameters["segment_ids"], dtype=tf.int32)
out = tf.segment_sum(data, segment_ids)
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["data_dtype"],
parameters["data_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
options.use_experimental_converter = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=0)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/segment_sum.py | Python | apache-2.0 | 2,232 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for strided_slice operators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
def _make_shape_to_strided_slice_test(options,
test_parameters,
expected_tf_failures=0):
"""Utility function to make shape_to_strided_slice_tests."""
def build_graph(parameters):
"""Build graph for shape_stride_slice test."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["dtype"],
name="input",
shape=parameters["dynamic_input_shape"])
begin = parameters["begin"]
end = parameters["end"]
strides = parameters["strides"]
tensors = [input_tensor]
out = tf.strided_slice(
tf.shape(input_tensor),
begin,
end,
strides,
begin_mask=parameters["begin_mask"],
end_mask=parameters["end_mask"])
return tensors, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Build inputs for stride_slice test."""
input_values = create_tensor_data(
parameters["dtype"],
parameters["input_shape"],
min_value=-1,
max_value=1)
values = [input_values]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=expected_tf_failures)
@register_make_test_function()
def make_shape_to_strided_slice_tests(options):
"""Make a set of tests to do shape op into strided_slice."""
test_parameters = [
# Test dynamic shape into strided slice quantization works.
{
"dtype": [tf.float32],
"dynamic_input_shape": [[None, 2, 2, 5]],
"input_shape": [[12, 2, 2, 5]],
"strides": [[1]],
"begin": [[0]],
"end": [[1]],
"begin_mask": [0],
"end_mask": [0],
"fully_quantize": [False, True],
"dynamic_range_quantize": [False],
},
{
"dtype": [tf.float32],
"dynamic_input_shape": [[None, 2, 2, 5]],
"input_shape": [[12, 2, 2, 5]],
"strides": [[1]],
"begin": [[0]],
"end": [[1]],
"begin_mask": [0],
"end_mask": [0],
"fully_quantize": [False],
"dynamic_range_quantize": [True],
},
]
_make_shape_to_strided_slice_test(
options, test_parameters, expected_tf_failures=0)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/shape_to_strided_slice.py | Python | apache-2.0 | 3,459 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for exp."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_softplus_tests(options):
"""Make a set of tests to do softplus."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[], [3], [1, 100], [4, 2, 3], [5, 224, 224, 3]],
}]
def build_graph(parameters):
"""Build the exp op testing graph."""
input_tensor = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
out = tf.math.softplus(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
values = [
create_tensor_data(
parameters["input_dtype"],
parameters["input_shape"],
min_value=-100,
max_value=9)
]
return values, sess.run(outputs, feed_dict=dict(zip(inputs, values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/softplus.py | Python | apache-2.0 | 2,007 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for static hashtable."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function("make_static_hashtable_tests")
def make_static_hashtable_tests(options):
"""Make a set of tests to use static hashtable."""
# Chose a set of parameters
test_parameters = [{
"table": [(tf.string, tf.int64, ["1", "2", "3"], [4, 5, 6], -1),
(tf.int64, tf.string, [1, 2, 3], ["4", "5", "6"], "-1")],
"input_shape": [[], [3], [1], [10]],
}]
def build_graph(parameters):
"""Build the graph for static hashtable tests."""
(key_dtype, value_dtype, keys, values, default_value) = parameters["table"]
key_tensor = tf.constant(keys, dtype=key_dtype)
value_tensor = tf.constant(values, dtype=value_dtype)
initializer = tf.lookup.KeyValueTensorInitializer(key_tensor, value_tensor)
table = tf.lookup.StaticHashTable(initializer, default_value)
with tf.control_dependencies([tf.initializers.tables_initializer()]):
input_value = tf.compat.v1.placeholder(
dtype=key_dtype, name="input", shape=parameters["input_shape"])
out = table.lookup(key_tensor)
return [input_value], [out]
def build_inputs(parameters, sess, inputs, outputs):
(key_dtype, _, _, _, _) = parameters["table"]
input_values = [create_tensor_data(key_dtype, parameters["input_shape"])]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
extra_toco_options = ExtraTocoOptions()
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/static_hashtable.py | Python | apache-2.0 | 2,687 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for static_rnn_with_control_flow_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.framework import test_util
from tensorflow.python.ops import rnn
@register_make_test_function("make_static_rnn_with_control_flow_v2_tests")
@test_util.enable_control_flow_v2
def make_static_rnn_with_control_flow_v2_tests(options):
"""Make a set of tests to do basic Lstm cell."""
test_parameters = [
{
"dtype": [tf.float32],
"num_batches": [4],
"time_step_size": [4],
"input_vec_size": [3],
"num_cells": [4],
"use_sequence_length": [True, False],
},
]
def build_graph(parameters):
"""Build a simple graph with BasicLSTMCell."""
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
num_cells = parameters["num_cells"]
inputs_after_split = []
for i in range(time_step_size):
one_timestamp_input = tf.placeholder(
dtype=parameters["dtype"],
name="split_{}".format(i),
shape=[num_batches, input_vec_size])
inputs_after_split.append(one_timestamp_input)
lstm_cell = tf.nn.rnn_cell.BasicLSTMCell(
num_cells, activation=tf.nn.relu, state_is_tuple=True)
sequence_length = None
if parameters["use_sequence_length"]:
# Using different sequence length in each bach, like [1, 2, 3, 3...].
sequence_length = [
min(i + 1, time_step_size) for i in range(num_batches)
]
cell_outputs, _ = rnn.static_rnn(
lstm_cell,
inputs_after_split,
dtype=tf.float32,
sequence_length=sequence_length)
out = cell_outputs[-1]
return inputs_after_split, [out]
def build_inputs(parameters, sess, inputs, outputs):
"""Feed inputs, assign variables, and freeze graph."""
with tf.variable_scope("", reuse=True):
kernel = tf.get_variable("rnn/basic_lstm_cell/kernel")
bias = tf.get_variable("rnn/basic_lstm_cell/bias")
kernel_values = create_tensor_data(parameters["dtype"],
[kernel.shape[0], kernel.shape[1]], -1,
1)
bias_values = create_tensor_data(parameters["dtype"], [bias.shape[0]], 0,
1)
sess.run(tf.group(kernel.assign(kernel_values), bias.assign(bias_values)))
num_batches = parameters["num_batches"]
time_step_size = parameters["time_step_size"]
input_vec_size = parameters["input_vec_size"]
input_values = []
for _ in range(time_step_size):
tensor_data = create_tensor_data(parameters["dtype"],
[num_batches, input_vec_size], 0, 1)
input_values.append(tensor_data)
out = sess.run(outputs, feed_dict=dict(zip(inputs, input_values)))
return input_values, out
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
use_frozen_graph=True)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/static_rnn_with_control_flow_v2.py | Python | apache-2.0 | 4,057 |
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for stft."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import ExtraTocoOptions
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_stft_tests(options):
"""Make a set of tests to do rfft2d."""
test_parameters = [{
"input_dtype": [tf.float32],
"input_shape": [[8], [8, 16], [3, 1, 4]],
"frame_length": [4, 8],
"frame_step": [1, 2, 4],
"fft_length": [None, 2, 4, 8],
}]
def build_graph(parameters):
input_value = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input",
shape=parameters["input_shape"])
outs = tf.signal.stft(
input_value,
frame_length=parameters["frame_length"],
frame_step=parameters["frame_step"],
fft_length=parameters["fft_length"])
return [input_value], [outs]
def build_inputs(parameters, sess, inputs, outputs):
input_value = create_tensor_data(parameters["input_dtype"],
parameters["input_shape"])
return [input_value], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_value])))
extra_toco_options = ExtraTocoOptions()
make_zip_of_tests(options, test_parameters, build_graph, build_inputs,
extra_toco_options)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/stft.py | Python | apache-2.0 | 2,300 |
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_concat."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_concat_tests(options):
"""Make a set of tests to do TensorListConcatV2."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[5], [3, 3]],
},
]
def build_graph(parameters):
"""Build the TensorListConcatV2 op testing graph."""
data = tf.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
out = list_ops.tensor_list_concat(tensor_list, parameters["element_dtype"],
parameters["element_shape"])
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_concat.py | Python | apache-2.0 | 2,367 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_dynamic_shape."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_dynamic_shape_tests(options):
"""Make a set of tests for tensorlists with dynamic shape."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
},
]
def build_graph(parameters):
"""Build the TensorListSetItem op testing graph."""
item = tf.placeholder(
dtype=parameters["element_dtype"], shape=parameters["element_shape"])
tensor_list = list_ops.tensor_list_reserve(
element_shape=None,
num_elements=parameters["num_elements"],
element_dtype=parameters["element_dtype"])
init_state = (0, tensor_list)
condition = lambda i, _: i < parameters["num_elements"]
def loop_body(i, tensor_list):
new_item = tf.add(
tf.add(item, item),
tf.constant(value=1, dtype=parameters["element_dtype"]))
new_list = list_ops.tensor_list_set_item(tensor_list, i, new_item)
return i + 1, new_list
_, tensor_list = tf.while_loop(condition, loop_body, init_state)
out = list_ops.tensor_list_stack(
tensor_list,
num_elements=parameters["num_elements"],
element_dtype=parameters["element_dtype"])
return [item], [out]
def build_inputs(parameters, sess, inputs, outputs):
item = create_tensor_data(parameters["element_dtype"],
parameters["element_shape"])
return [item], sess.run(outputs, feed_dict=dict(zip(inputs, [item])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_dynamic_shape.py | Python | apache-2.0 | 2,770 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_get_item."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_get_item_tests(options):
"""Make a set of tests to do TensorListGetItem."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
"index": [0, 1, 2, 3],
},
]
def build_graph(parameters):
"""Build the TensorListGetItem op testing graph."""
data = tf.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
out = list_ops.tensor_list_get_item(tensor_list, parameters["index"],
parameters["element_dtype"])
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_get_item.py | Python | apache-2.0 | 2,402 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_length."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_length_tests(options):
"""Make a set of tests to do TensorListLength."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
},
]
def build_graph(parameters):
"""Build the TensorListLength op testing graph."""
data = tf.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
out = list_ops.tensor_list_length(tensor_list)
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_length.py | Python | apache-2.0 | 2,271 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_resize."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_resize_tests(options):
"""Make a set of tests to do TensorListResize."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
"new_size": [1, 3, 5, 7],
},
]
def build_graph(parameters):
"""Build the TensorListResize op testing graph."""
data = tf.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
tensor_list = list_ops.tensor_list_resize(tensor_list,
parameters["new_size"])
out = list_ops.tensor_list_stack(
tensor_list, element_dtype=parameters["element_dtype"])
return [data], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
return [data], sess.run(outputs, feed_dict=dict(zip(inputs, [data])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_resize.py | Python | apache-2.0 | 2,487 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tensor_list_set_item."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.ops import list_ops
@register_make_test_function()
def make_tensor_list_set_item_tests(options):
"""Make a set of tests to do TensorListSetItem."""
test_parameters = [
{
"element_dtype": [tf.float32, tf.int32],
"num_elements": [4, 5, 6],
"element_shape": [[], [5], [3, 3]],
"index": [0, 1, 2, 3],
},
]
def build_graph(parameters):
"""Build the TensorListSetItem op testing graph."""
data = tf.placeholder(
dtype=parameters["element_dtype"],
shape=[parameters["num_elements"]] + parameters["element_shape"])
item = tf.placeholder(
dtype=parameters["element_dtype"], shape=parameters["element_shape"])
tensor_list = list_ops.tensor_list_from_tensor(data,
parameters["element_shape"])
tensor_list = list_ops.tensor_list_set_item(tensor_list,
parameters["index"], item)
out = list_ops.tensor_list_stack(
tensor_list,
num_elements=parameters["num_elements"],
element_dtype=parameters["element_dtype"])
return [data, item], [out]
def build_inputs(parameters, sess, inputs, outputs):
data = create_tensor_data(parameters["element_dtype"],
[parameters["num_elements"]] +
parameters["element_shape"])
item = create_tensor_data(parameters["element_dtype"],
parameters["element_shape"])
return [data, item], sess.run(
outputs, feed_dict=dict(zip(inputs, [data, item])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/tensor_list_set_item.py | Python | apache-2.0 | 2,804 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for where_v2."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_where_v2_tests(options):
"""Make a set of tests to do where_v2."""
test_parameters = [
{
"input_condition_shape": [[1, 2, 3, 4]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 3, 4], [1, 1, 1, 1]),],
},
{
"input_condition_shape": [[2], [1]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([2, 1, 2, 1], [2, 1, 2, 1]),],
},
{
"input_condition_shape": [[1, 4, 2]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 3, 4, 2], [1, 3, 4, 2]),],
},
{
"input_condition_shape": [[1, 2]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 2], [1, 2, 2]),],
},
{
"input_condition_shape": [[1, 1]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 1, 2, 2], [1, 1, 2, 2]),],
},
{
"input_condition_shape": [[4]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([4, 4], [4, 4]),],
},
{
"input_condition_shape": [[2]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([2, 3], [2, 3]),],
},
{
"input_condition_shape": [[1, 2]],
"input_dtype": [tf.float32, tf.int32],
"input_shape_set": [([1, 2, 2], [1, 2]),],
},
]
def build_graph(parameters):
"""Build the where op testing graph."""
input_condition = tf.compat.v1.placeholder(
dtype=tf.bool,
name="input_condition",
shape=parameters["input_condition_shape"])
input_value1 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input_x",
shape=parameters["input_shape_set"][0])
input_value2 = tf.compat.v1.placeholder(
dtype=parameters["input_dtype"],
name="input_y",
shape=parameters["input_shape_set"][1])
out = tf.where_v2(input_condition, input_value1, input_value2)
return [input_condition, input_value1, input_value2], [out]
def build_inputs(parameters, sess, inputs, outputs):
input_condition = create_tensor_data(tf.bool,
parameters["input_condition_shape"])
input_value1 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][0])
input_value2 = create_tensor_data(parameters["input_dtype"],
parameters["input_shape_set"][1])
return [input_condition, input_value1, input_value2], sess.run(
outputs,
feed_dict=dict(
zip(inputs, [input_condition, input_value1, input_value2])))
options.use_experimental_converter = True
make_zip_of_tests(
options,
test_parameters,
build_graph,
build_inputs,
expected_tf_failures=2)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/where_v2.py | Python | apache-2.0 | 4,062 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for while_loop."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing import zip_test_utils
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
from tensorflow.python.framework import test_util
@register_make_test_function("make_while_tests")
@test_util.enable_control_flow_v2
def make_while_tests(options):
"""Make a set of tests to do while."""
# Chose a set of parameters
test_parameters = [{
"num_iterations": range(20),
"increment_value": [[1]],
"dtype": [tf.int32],
}, {
"num_iterations": range(20),
"increment_value": [["a"]],
"dtype": [tf.string],
}]
def build_graph(parameters):
"""Build the graph for while tests."""
# MLIR TFLite converter can't handle scalar inputs. This is a workaround
# to input (1,) tensors and then reshape to scalar.
# TODO(b/129003347): Remove the workaround after scalar inputs are
# supported.
num_iterations = tf.placeholder(
dtype=tf.int32, name="num_iterations", shape=(1,))
increment_value = tf.placeholder(
dtype=parameters["dtype"], name="increment_value", shape=(1,))
num_iterations_scalar = tf.reshape(num_iterations, ())
# For intger inputs, this simple model calucates i-th number of triangular
# sequence. For string inputs, the model returns the string value, filled
# with the given increment value times the given num_iterations.
# The model also returns the counter variable and increment value in the
# outputs. The counter and increment value are passed to the result to make
# sure the necessary control depenecy of the model is generated for testing
# the dynamic tensor cases.
def cond_fn(counter, value, increment_value):
del value
del increment_value
return counter < num_iterations_scalar
def body_fn(counter, value, increment_value):
new_counter = counter + 1
if parameters["dtype"] == tf.string:
# Use fill op to create new string value with the given counter value.
del value
new_value = tf.fill([1], tf.reshape(increment_value, ()))
else:
new_value = value + increment_value
return [new_counter, new_value, increment_value]
counter, value, result_increment_value = tf.while_loop(
cond_fn, body_fn, loop_vars=[1, increment_value, increment_value])
return [num_iterations,
increment_value], [counter, value, result_increment_value]
def build_inputs(parameters, sess, inputs, outputs):
numpy_type = zip_test_utils.TF_TYPE_INFO[parameters["dtype"]][0]
input_values = [
np.array([parameters["num_iterations"]], dtype=np.int32),
np.array(parameters["increment_value"], dtype=numpy_type)
]
return input_values, sess.run(
outputs, feed_dict=dict(zip(inputs, input_values)))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/mlir/testing/op_tests/while_loop.py | Python | apache-2.0 | 3,830 |
load("//tensorflow:tensorflow.bzl", "get_compatible_with_portable")
package(
default_visibility = ["//visibility:public"],
licenses = ["notice"],
)
cc_library(
name = "resource",
srcs = [
"resource_variable.cc",
"static_hashtable.cc",
],
hdrs = [
"lookup_interfaces.h",
"lookup_util.h",
"resource_base.h",
"resource_variable.h",
"static_hashtable.h",
],
compatible_with = get_compatible_with_portable(),
deps = [
"//tensorflow/lite:string_util",
"//tensorflow/lite/c:common",
"//tensorflow/lite/kernels/internal:compatibility",
"//tensorflow/lite/kernels/internal:tensor",
],
)
cc_test(
name = "resource_variable_test",
srcs = [
"resource_variable_test.cc",
],
deps = [
":resource",
"//tensorflow/lite:util",
"//tensorflow/lite/c:common",
"@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/BUILD | Starlark | apache-2.0 | 1,016 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_LOOKUP_INTERFACES_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_LOOKUP_INTERFACES_H_
#include <unordered_map>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/resource/lookup_util.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace resource {
/// WARNING: Experimental interface, subject to change.
// A resource hash table interface. It's similar to TensorFlow core's
// LookupInterface class. But it's identified with int32 ID in TFLite (instead
// of using Resource handle like TensorFlow).
class LookupInterface : public ResourceBase {
public:
virtual TfLiteStatus Lookup(TfLiteContext* context, const TfLiteTensor* keys,
TfLiteTensor* values,
const TfLiteTensor* default_value) = 0;
virtual TfLiteStatus Import(TfLiteContext* context, const TfLiteTensor* keys,
const TfLiteTensor* values) = 0;
virtual size_t Size() = 0;
virtual TfLiteType GetKeyType() const = 0;
virtual TfLiteType GetValueType() const = 0;
virtual TfLiteStatus CheckKeyAndValueTypes(TfLiteContext* context,
const TfLiteTensor* keys,
const TfLiteTensor* values) = 0;
};
// Creates an resource hash table, shared among all the subgraphs with the
// given resource id if there is an existing one.
// WARNING: Experimental interface, subject to change.
void CreateHashtableResourceIfNotAvailable(ResourceMap* resources,
int resource_id,
TfLiteType key_dtype,
TfLiteType value_dtype);
// Returns the corresponding resource hash table, or nullptr if none.
// WARNING: Experimental interface, subject to change.
LookupInterface* GetHashtableResource(ResourceMap* resources, int resource_id);
} // namespace resource
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_LOOKUP_INTERFACES_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/lookup_interfaces.h | C++ | apache-2.0 | 2,900 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_LOOKUP_UTIL_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_LOOKUP_UTIL_H_
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace resource {
namespace internal {
/// Helper class for accessing TFLite tensor data.
template <typename T>
class TensorReader {
public:
explicit TensorReader(const TfLiteTensor* input) {
input_data_ = GetTensorData<T>(input);
}
// Returns the corresponding scalar data at the given index position.
// In here, it does not check the validity of the index should be guaranteed
// in order not to harm the performance. Caller should take care of it.
T GetData(int index) { return input_data_[index]; }
private:
const T* input_data_;
};
/// Helper class for accessing TFLite tensor data. This specialized class is for
/// std::string type.
template <>
class TensorReader<std::string> {
public:
explicit TensorReader(const TfLiteTensor* input) : input_(input) {}
// Returns the corresponding string data at the given index position.
// In here, it does not check the validity of the index should be guaranteed
// in order not to harm the performance. Caller should take care of it.
std::string GetData(int index) {
auto string_ref = GetString(input_, index);
return std::string(string_ref.str, string_ref.len);
}
private:
const TfLiteTensor* input_;
};
/// WARNING: Experimental interface, subject to change.
/// Helper class for writing TFLite tensor data.
template <typename ValueType>
class TensorWriter {
public:
explicit TensorWriter(TfLiteTensor* values) {
output_data_ = GetTensorData<ValueType>(values);
}
// Sets the given value to the given index position of the tensor storage.
// In here, it does not check the validity of the index should be guaranteed
// in order not to harm the performance. Caller should take care of it.
void SetData(int index, ValueType& value) { output_data_[index] = value; }
// Commit updates. In this case, it does nothing since the SetData method
// writes data directly.
void Commit() {
// Noop.
}
private:
ValueType* output_data_;
};
/// WARNING: Experimental interface, subject to change.
/// Helper class for writing TFLite tensor data. This specialized class is for
/// std::string type.
template <>
class TensorWriter<std::string> {
public:
explicit TensorWriter(TfLiteTensor* values) : values_(values) {}
// Queues the given string value to the buffer regardless of the provided
// index.
// In here, it does not check the validity of the index should be guaranteed
// in order not to harm the performance. Caller should take care of it.
void SetData(int index, const std::string& value) {
buf_.AddString(value.data(), value.length());
}
// Commit updates. The stored data in DynamicBuffer will be written into the
// tensor storage.
void Commit() { buf_.WriteToTensor(values_, nullptr); }
private:
TfLiteTensor* values_;
DynamicBuffer buf_;
};
} // namespace internal
} // namespace resource
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_LOOKUP_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/lookup_util.h | C++ | apache-2.0 | 3,867 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_BASE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_BASE_H_
#include <cstdint>
#include <memory>
#include <unordered_map>
namespace tflite {
namespace resource {
// ResourceBase is an abstract base class for resources.
/// WARNING: Experimental interface, subject to change.
class ResourceBase {
public:
explicit ResourceBase() {}
virtual ~ResourceBase() {}
// Returns true if it is initialized.
virtual bool IsInitialized() = 0;
};
/// WARNING: Experimental interface, subject to change.
using ResourceMap =
std::unordered_map<std::int32_t, std::unique_ptr<ResourceBase>>;
} // namespace resource
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_BASE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/resource_base.h | C++ | apache-2.0 | 1,443 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <cstdlib>
#include <cstring>
#include <map>
#include <memory>
namespace tflite {
namespace resource {
ResourceVariable::ResourceVariable() {
memset(&tensor_, 0, sizeof(TfLiteTensor));
}
ResourceVariable::ResourceVariable(ResourceVariable&& other) {
tensor_ = other.tensor_;
is_initialized_ = other.is_initialized_;
memset(&other.tensor_, 0, sizeof(TfLiteTensor));
other.is_initialized_ = false;
}
ResourceVariable::~ResourceVariable() {
if (is_initialized_) {
free(tensor_.data.raw);
if (tensor_.dims) {
TfLiteIntArrayFree(tensor_.dims);
}
}
}
TfLiteStatus ResourceVariable::AssignFrom(const TfLiteTensor* tensor) {
// Save the old allocated resources and attributes that we might use.
char* old_raw = tensor_.data.raw;
size_t old_bytes = tensor_.bytes;
TfLiteIntArray* old_dims = tensor_.dims;
// Copy primitive parameters.
memset(&tensor_, 0, sizeof(tensor_));
tensor_.allocation_type = kTfLiteDynamic;
tensor_.type = tensor->type;
tensor_.params = tensor->params;
tensor_.quantization = tensor->quantization;
// Copy old shape if possible otherwise create a new one.
if (TfLiteIntArrayEqual(old_dims, tensor->dims)) {
tensor_.dims = old_dims;
} else {
TfLiteIntArrayFree(old_dims);
tensor_.dims = TfLiteIntArrayCopy(tensor->dims);
}
// Reuse the same buffer if possible otherwise allocate a new one.
tensor_.data.raw = old_raw;
if (old_bytes != tensor->bytes) {
TfLiteTensorRealloc(tensor->bytes, &tensor_);
} else {
tensor_.bytes = old_bytes;
}
memcpy(tensor_.data.raw, tensor->data.raw, tensor_.bytes);
is_initialized_ = true;
return kTfLiteOk;
}
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id) {
if (resources->count(resource_id) != 0) {
return;
}
resources->emplace(resource_id,
std::unique_ptr<ResourceVariable>(new ResourceVariable()));
}
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id) {
auto it = resources->find(resource_id);
if (it != resources->end()) {
return static_cast<ResourceVariable*>(it->second.get());
}
return nullptr;
}
} // namespace resource
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/resource_variable.cc | C++ | apache-2.0 | 3,003 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
namespace tflite {
namespace resource {
/// WARNING: Experimental interface, subject to change.
// A resource variable class. It's similar to TensorFlow Resource
// Variable, but it's identified with int32 ID in TFLite (instead of
// using Resource handle like TensorFlow).
class ResourceVariable : public ResourceBase {
public:
ResourceVariable();
ResourceVariable(ResourceVariable&& other);
ResourceVariable(const ResourceVariable&) = delete;
ResourceVariable& operator=(const ResourceVariable&) = delete;
~ResourceVariable() override;
// Assigns data from a tensor. Copies its type, shape and data over.
TfLiteStatus AssignFrom(const TfLiteTensor* tensor);
// Get the data tensor stored in the resource variable.
// Returns `nullptr` if the variable is never initialized by calling
// `AssignFrom`.
TfLiteTensor* GetTensor() { return is_initialized_ ? &tensor_ : nullptr; }
// Returns true if this resource variable is initialized.
bool IsInitialized() override { return is_initialized_; }
private:
// The tensor (and its buffer stored in `tensor_.data` is fully owned by
// the `ResourceVariable` object.
TfLiteTensor tensor_;
// True if `AssignFrom` function is every called.
// False if and only if `tensor_` is filled with zeros.
bool is_initialized_ = false;
};
// Creates a resource variable, shared among all the subgraphs with the given
// resource id if there is an existing one.
// WARNING: Experimental interface, subject to change.
void CreateResourceVariableIfNotAvailable(ResourceMap* resources,
int resource_id);
// Returns the corresponding resource variable, or nullptr if none.
// WARNING: Experimental interface, subject to change.
ResourceVariable* GetResourceVariable(ResourceMap* resources, int resource_id);
} // namespace resource
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_RESOURCE_VARIABLE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/resource_variable.h | C++ | apache-2.0 | 2,853 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/resource/resource_variable.h"
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/util.h"
namespace tflite {
namespace resource {
// Helper util that initialize 'tensor'.
void InitTensor(const std::vector<int>& shape, TfLiteAllocationType alloc_type,
float default_value, TfLiteTensor* tensor) {
memset(tensor, 0, sizeof(TfLiteTensor));
int num_elements = 1;
for (auto dim : shape) num_elements *= dim;
if (shape.empty()) num_elements = 0;
float* buf = static_cast<float*>(malloc(sizeof(float) * num_elements));
for (int i = 0; i < num_elements; ++i) buf[i] = default_value;
const int bytes = num_elements * sizeof(buf[0]);
auto* dims = ConvertArrayToTfLiteIntArray(shape.size(), shape.data());
TfLiteTensorReset(TfLiteType::kTfLiteFloat32, nullptr, dims, {},
reinterpret_cast<char*>(buf), bytes, alloc_type, nullptr,
false, tensor);
}
TEST(ResourceTest, NonDynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteArenaRw, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
// Variables are always dynamic type.
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
// Cleanup
// For non dynamic tensors we need to delete the buffers manually.
free(tensor.data.raw);
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, DynamicTensorAssign) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
TfLiteTensor tensor;
std::vector<int> shape = {1};
InitTensor(shape, kTfLiteDynamic, 1.0f, &tensor);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
// Variables are always dynamic type.
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
// Cleanup
TfLiteTensorFree(&tensor);
}
TEST(ResourceTest, AssignSameSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
// We create 2 tensors and make 2 calls for Assign.
// The second Assign call should trigger the case of assign with same size.
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {1};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
// Variables are always dynamic type.
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
// Second AssignFrom but now tensor_b has same size as the variable.
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
// Variables are always dynamic type.
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(4.0f, value->data.f[0]);
// Cleanup
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
TEST(ResourceTest, AssignDifferentSizeTensor) {
ResourceVariable var;
EXPECT_FALSE(var.IsInitialized());
// We create 2 tensors and make 2 calls for Assign.
// The second Assign call should trigger the case of assign with different
// size.
TfLiteTensor tensor_a, tensor_b;
std::vector<int> shape_a = {1};
std::vector<int> shape_b = {2};
InitTensor(shape_a, kTfLiteDynamic, 1.0, &tensor_a);
InitTensor(shape_b, kTfLiteDynamic, 4.0, &tensor_b);
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_a));
EXPECT_TRUE(var.IsInitialized());
auto* value = var.GetTensor();
// Variables are always dynamic type.
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float), value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(1, value->dims->data[0]);
EXPECT_EQ(1.0f, value->data.f[0]);
// Second AssignFrom but now tensor_b has different size from the variable.
EXPECT_EQ(kTfLiteOk, var.AssignFrom(&tensor_b));
EXPECT_TRUE(var.IsInitialized());
value = var.GetTensor();
// Variables are always dynamic type.
EXPECT_EQ(kTfLiteDynamic, value->allocation_type);
EXPECT_EQ(kTfLiteFloat32, value->type);
EXPECT_EQ(sizeof(float) * 2, value->bytes);
EXPECT_EQ(1, value->dims->size);
EXPECT_EQ(2, value->dims->data[0]);
EXPECT_EQ(4.0f, value->data.f[0]);
// Cleanup
TfLiteTensorFree(&tensor_a);
TfLiteTensorFree(&tensor_b);
}
} // namespace resource
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/resource_variable_test.cc | C++ | apache-2.0 | 5,996 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/experimental/resource/static_hashtable.h"
#include <memory>
#include "tensorflow/lite/experimental/resource/lookup_interfaces.h"
namespace tflite {
namespace resource {
namespace internal {
template <typename KeyType, typename ValueType>
TfLiteStatus StaticHashtable<KeyType, ValueType>::Lookup(
TfLiteContext* context, const TfLiteTensor* keys, TfLiteTensor* values,
const TfLiteTensor* default_value) {
if (!is_initialized_) {
context->ReportError(context,
"hashtable need to be initialized before using");
return kTfLiteError;
}
const int size =
MatchingFlatSize(GetTensorShape(keys), GetTensorShape(values));
auto key_tensor_reader = TensorReader<KeyType>(keys);
auto value_tensor_writer = TensorWriter<ValueType>(values);
auto default_value_tensor_reader = TensorReader<ValueType>(default_value);
ValueType first_default_value = default_value_tensor_reader.GetData(0);
for (int i = 0; i < size; ++i) {
auto result = map_.find(key_tensor_reader.GetData(i));
if (result != map_.end()) {
value_tensor_writer.SetData(i, result->second);
} else {
value_tensor_writer.SetData(i, first_default_value);
}
}
// This is for a string tensor case in order to write buffer back to the
// actual tensor destination. Otherwise, it does nothing since the scalar data
// will be written into the tensor storage directly.
value_tensor_writer.Commit();
return kTfLiteOk;
}
template <typename KeyType, typename ValueType>
TfLiteStatus StaticHashtable<KeyType, ValueType>::Import(
TfLiteContext* context, const TfLiteTensor* keys,
const TfLiteTensor* values) {
// Import nodes can be invoked twice because the converter will not extract
// the initializer graph separately from the original graph. The invocations
// after the first call will be ignored.
if (is_initialized_) {
return kTfLiteOk;
}
const int size =
MatchingFlatSize(GetTensorShape(keys), GetTensorShape(values));
auto key_tensor_reader = TensorReader<KeyType>(keys);
auto value_tensor_writer = TensorReader<ValueType>(values);
for (int i = 0; i < size; ++i) {
map_.insert({key_tensor_reader.GetData(i), value_tensor_writer.GetData(i)});
}
is_initialized_ = true;
return kTfLiteOk;
}
LookupInterface* CreateStaticHashtable(TfLiteType key_type,
TfLiteType value_type) {
if (key_type == kTfLiteInt64 && value_type == kTfLiteString) {
return new StaticHashtable<std::int64_t, std::string>(key_type, value_type);
} else if (key_type == kTfLiteString && value_type == kTfLiteInt64) {
return new StaticHashtable<std::string, std::int64_t>(key_type, value_type);
}
return nullptr;
}
} // namespace internal
void CreateHashtableResourceIfNotAvailable(ResourceMap* resources,
int resource_id,
TfLiteType key_dtype,
TfLiteType value_dtype) {
if (resources->count(resource_id) != 0) {
return;
}
auto* hashtable = internal::CreateStaticHashtable(key_dtype, value_dtype);
resources->emplace(resource_id, std::unique_ptr<LookupInterface>(hashtable));
}
LookupInterface* GetHashtableResource(ResourceMap* resources, int resource_id) {
auto it = resources->find(resource_id);
if (it != resources->end()) {
return static_cast<LookupInterface*>(it->second.get());
}
return nullptr;
}
} // namespace resource
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/static_hashtable.cc | C++ | apache-2.0 | 4,233 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_STATIC_HASHTABLE_H_
#define TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_STATIC_HASHTABLE_H_
#include <unordered_map>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/experimental/resource/lookup_interfaces.h"
#include "tensorflow/lite/experimental/resource/lookup_util.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
namespace resource {
namespace internal {
// A static hash table class. This hash table allows initialization one time in
// its life cycle. This hash table implements Tensorflow core's HashTableV2 op.
template <typename KeyType, typename ValueType>
class StaticHashtable : public tflite::resource::LookupInterface {
public:
explicit StaticHashtable(TfLiteType key_type, TfLiteType value_type)
: key_type_(key_type), value_type_(value_type) {}
~StaticHashtable() override {}
// Finds the corresponding value of the given keys tensor in the map and
// copies the result data to the given values tensor. If there is no matching
// value, it will write the default value into the matched position instead.
TfLiteStatus Lookup(TfLiteContext* context, const TfLiteTensor* keys,
TfLiteTensor* values,
const TfLiteTensor* default_value) override;
// Inserts the given key and value tensor data into the hash table.
TfLiteStatus Import(TfLiteContext* context, const TfLiteTensor* keys,
const TfLiteTensor* values) override;
// Returns the item size of the hash table.
size_t Size() override { return map_.size(); }
TfLiteType GetKeyType() const override { return key_type_; }
TfLiteType GetValueType() const override { return value_type_; }
TfLiteStatus CheckKeyAndValueTypes(TfLiteContext* context,
const TfLiteTensor* keys,
const TfLiteTensor* values) override {
TF_LITE_ENSURE_EQ(context, keys->type, key_type_);
TF_LITE_ENSURE_EQ(context, values->type, value_type_);
return kTfLiteOk;
}
// Returns true if the hash table is initialized.
bool IsInitialized() override { return is_initialized_; }
private:
TfLiteType key_type_;
TfLiteType value_type_;
std::unordered_map<KeyType, ValueType> map_;
bool is_initialized_ = false;
};
::tflite::resource::LookupInterface* CreateStaticHashtable(
TfLiteType key_type, TfLiteType value_type);
} // namespace internal
} // namespace resource
} // namespace tflite
#endif // TENSORFLOW_LITE_EXPERIMENTAL_RESOURCE_STATIC_HASHTABLE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/resource/static_hashtable.h | C++ | apache-2.0 | 3,372 |
# TFLite modules to support TensorBoard plugin.
package(
default_visibility = ["//tensorflow:internal"],
licenses = ["notice"],
)
py_library(
name = "ops_util",
srcs = ["ops_util.py"],
srcs_version = "PY3",
visibility = ["//visibility:public"],
deps = [
"//tensorflow/lite/python:wrap_toco",
"//tensorflow/python:util",
"//tensorflow/python/util:tf_export",
],
)
py_test(
name = "ops_util_test",
srcs = ["ops_util_test.py"],
python_version = "PY3",
srcs_version = "PY3",
deps = [
":ops_util",
"//tensorflow/python:client_testlib",
],
)
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/tensorboard/BUILD | Starlark | apache-2.0 | 635 |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops util to handle ops for Lite."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from tensorflow.lite.python import wrap_toco
from tensorflow.python.util import deprecation
from tensorflow.python.util.tf_export import tf_export
class SupportedOp(collections.namedtuple("SupportedOp", ["op"])):
"""Spec of supported ops.
Args:
op: string of op name.
"""
@tf_export(v1=["lite.experimental.get_potentially_supported_ops"])
@deprecation.deprecated(
None, "Deprecated in TF 2.4 and targeted to remove after TF 2.5. This"
"experimental function in TF v1 is to get a list of op names without real "
"conversion. To check whether a model can be convertable or not indeed, "
"please run `tf.lite.TFLiteConverter`.")
def get_potentially_supported_ops():
"""Returns operations potentially supported by TensorFlow Lite.
The potentially support list contains a list of ops that are partially or
fully supported, which is derived by simply scanning op names to check whether
they can be handled without real conversion and specific parameters.
Given that some ops may be partially supported, the optimal way to determine
if a model's operations are supported is by converting using the TensorFlow
Lite converter.
Returns:
A list of SupportedOp.
"""
ops = wrap_toco.wrapped_get_potentially_supported_ops()
return [SupportedOp(o["op"]) for o in ops]
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/tensorboard/ops_util.py | Python | apache-2.0 | 2,171 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for backend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.lite.experimental.tensorboard import ops_util
from tensorflow.python.platform import test
class OpsUtilTest(test.TestCase):
def testGetPotentiallySupportedOps(self):
ops = ops_util.get_potentially_supported_ops()
# See GetTensorFlowNodeConverterMap() in
# tensorflow/lite/toco/import_tensorflow.cc
self.assertIsInstance(ops, list)
# Test partial ops that surely exist in the list.
self.assertIn(ops_util.SupportedOp("Add"), ops)
self.assertIn(ops_util.SupportedOp("Log"), ops)
self.assertIn(ops_util.SupportedOp("Sigmoid"), ops)
self.assertIn(ops_util.SupportedOp("Softmax"), ops)
if __name__ == "__main__":
test.main()
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/experimental/tensorboard/ops_util_test.py | Python | apache-2.0 | 1,505 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_EXTERNAL_CPU_BACKEND_CONTEXT_H_
#define TENSORFLOW_LITE_EXTERNAL_CPU_BACKEND_CONTEXT_H_
#include <memory>
#include <utility>
#include "tensorflow/lite/c/common.h"
namespace tflite {
// This is the base class for TF Lite internal backend contexts (like a
// RUY-based cpu backend context class). A derived internal backend context is
// generally a collection of utilities (i.e. a thread pool etc.) for TF Lite to
// use certain kernel libraries, such as Gemmlowp, RUY, etc., to implement TF
// Lite operators.
class TfLiteInternalBackendContext {
public:
virtual ~TfLiteInternalBackendContext() {}
// Set the maximum number of threads that could be used for parallelizing
// TfLite computation.
virtual void SetMaxNumThreads(int max_num_threads) = 0;
// A context may internally cache prepacked versions of constant tensors for
// faster computation. This function will clear any caches on the context.
virtual void ClearCaches() = 0;
};
// This TfLiteExternalContext-derived class is the default
// 'kTfLiteCpuBackendContext'-typed context that's used internally in TF Lite
// framework. The primary purpose of having this class is to allow the same cpu
// backend context to be sharable among a set of TF Lite interpreters so that
// certain system costs are saved, like saving the cost of having multiple
// thread pools in each separate cpu backend context etc..
//
// Note: as of 2019/07/19, such context sharing among a set of interpreters will
// break the execution if these interpreters are invoked simultaneously. It
// works only when these context-sharing interpreters are invoked in a
// serialized way. Here's an example to illustrate the context sharing among 2
// TF Lite interpreters:
//
// TfLiteExternalContext* global_ctxt = new ExternalCpuBackendContext();
// interpreter1 = /*...*/;
// interpreter1->SetExternalContext(kTfLiteCpuBackendContext, global_ctxt);
// interpreter2 = /*...*/;
// interpreter2->SetExternalContext(kTfLiteCpuBackendContext, global_ctxt);
//
// interpreter1->SetNumThreads(2);
// interpreter1->Invoke();
//
// interpreter2->SetNumThreads(4);
// interpreter2->Invoke();
//
// After sharing the context, calling 'SetNumThreads' on any of the
// context-sharing interpreters will have the global impact as it also refreshes
// the #thread info in the global cpu backend context (i.e. 'global_ctxt' above)
// that affects how much parallelism an interpreter invocation will use.
// Therefore, if different number of threads are used among different
// interpreters, don't call 'SetNumThreads' consecutively but call it
// separately between each interpreter's invocation as illustrated above.
//
// Note: it is the responsibility of the user of this context (i.e. a
// TFLiteInterpreter) to clear any state from the internal backend
// context if/when the interpreter no longer needs the shared context.
// See, e.g., TFLiteInterpreter destructor clears caches in the case of a
// shared ExternalCpuBackendContext.
class ExternalCpuBackendContext : public TfLiteExternalContext {
public:
ExternalCpuBackendContext();
~ExternalCpuBackendContext() {}
void set_internal_backend_context(
std::unique_ptr<TfLiteInternalBackendContext> internal_backend_context) {
internal_backend_context_ = std::move(internal_backend_context);
}
TfLiteInternalBackendContext* internal_backend_context() const {
return internal_backend_context_.get();
}
private:
// Note the actual internal backend context object is lazily initialized.
std::unique_ptr<TfLiteInternalBackendContext> internal_backend_context_;
ExternalCpuBackendContext(const ExternalCpuBackendContext&) = delete;
ExternalCpuBackendContext& operator=(const ExternalCpuBackendContext&) =
delete;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_EXTERNAL_CPU_BACKEND_CONTEXT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/external_cpu_backend_context.h | C++ | apache-2.0 | 4,537 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_GRAPH_INFO_H_
#define TENSORFLOW_LITE_GRAPH_INFO_H_
#include <stddef.h>
#include <vector>
#include "tensorflow/lite/c/common.h"
namespace tflite {
// Basic information about an inference graph, where execution nodes
// are connected via tensors.
class GraphInfo {
public:
virtual ~GraphInfo() {}
// Total number of tensors in the graph.
virtual size_t num_tensors() const = 0;
// Returns a tensor given its index which is expected to be between 0 and
// num_tensors().
virtual TfLiteTensor* tensor(size_t index) = 0;
// Number of nodes in the current execution plan.
virtual size_t num_execution_nodes() const = 0;
// Total number of known nodes, which may include nodes that are no longer in
// the execution plan. This happens in case of applying multiple delegates.
// Should be >= num_execution_nodes()
virtual size_t num_total_nodes() const = 0;
// Returns a node given its index in the execution plan, which is expected to
// be between 0 and num_execution_nodes().
virtual const TfLiteNode& node(size_t index) const = 0;
// Returns an implementation-specific node index which may be different from
// execution-plan index.
// Expected to be between 0 and num_total_nodes().
virtual size_t node_index(size_t index) const = 0;
// Returns the indices of the input tensors.
virtual const std::vector<int>& inputs() const = 0;
// Returns the indices of the output tensors.
virtual const std::vector<int>& outputs() const = 0;
// Returns the indices of the variable tensors.
virtual const std::vector<int>& variables() const = 0;
};
// Represents a subset of nodes in a TensorFlow Lite graph.
struct NodeSubset {
enum Type {
kTfUnexplored = 0, // temporarily used during creation
kTfPartition,
kTfNonPartition
};
Type type = kTfUnexplored;
// Nodes within the node sub set
std::vector<int> nodes;
// Tensors that stride output from another node sub set that this depends on,
// or global inputs to the TensorFlow Lite full graph.
std::vector<int> input_tensors;
// Outputs that are consumed by other node sub sets or are global output
// tensors. All output tensors of the nodes in the node sub set that do not
// appear in this list are intermediate results that can be potentially
// elided.
std::vector<int> output_tensors;
};
// Partitions a list of node indices `nodes_to_partition` into node sub sets.
// Each node sub set is in dependency order (i.e. all members of the node sub
// sets). `node_subsets` is assumed to be empty.
TfLiteStatus PartitionGraphIntoIndependentNodeSubsets(
const GraphInfo* info, const TfLiteIntArray* nodes_to_partition,
std::vector<NodeSubset>* node_subsets);
} // namespace tflite
#endif // TENSORFLOW_LITE_GRAPH_INFO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/graph_info.h | C++ | apache-2.0 | 3,469 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/// \file
/// Main abstraction controlling the tflite interpreter.
/// See context.h for the API for defining operations (TfLiteRegistration).
#ifndef TENSORFLOW_LITE_INTERPRETER_H_
#define TENSORFLOW_LITE_INTERPRETER_H_
#include <stddef.h>
#include <stdint.h>
#include <complex>
#include <cstdio>
#include <cstdlib>
#include <functional>
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/c/common.h" // IWYU pragma: export
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/profiler.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/experimental/resource/resource_base.h"
#include "tensorflow/lite/external_cpu_backend_context.h"
#include "tensorflow/lite/memory_planner.h"
#include "tensorflow/lite/portable_type_to_tflitetype.h"
#include "tensorflow/lite/stderr_reporter.h"
#include "tensorflow/lite/string_type.h"
#include "tensorflow/lite/type_to_tflitetype.h"
namespace tflite {
class InterpreterTest; // Class for friend declarations.
namespace delegates {
class InterpreterUtils; // Class for friend declarations.
namespace test_utils {
class TestDelegation; // Class for friend declarations.
} // namespace test_utils
} // namespace delegates
/// An interpreter for a graph of nodes that input and output from tensors.
/// Each node of the graph processes a set of input tensors and produces a
/// set of output Tensors. All inputs/output tensors are referenced by index.
///
/// Usage:
///
/// <pre><code>
/// // Create model from file. Note that the model instance must outlive the
/// // interpreter instance.
/// auto model = tflite::FlatBufferModel::BuildFromFile(...);
/// if (model == nullptr) {
/// // Return error.
/// }
/// // Create an Interpreter with an InterpreterBuilder.
/// std::unique_ptr<tflite::Interpreter> interpreter;
/// tflite::ops::builtin::BuiltinOpResolver resolver;
/// if (InterpreterBuilder(*model, resolver)(&interpreter) != kTfLiteOk) {
/// // Return failure.
/// }
/// if (interpreter->AllocateTensors() != kTfLiteOk) {
/// // Return failure.
/// }
///
/// auto input = interpreter->typed_tensor<float>(0);
/// for (int i = 0; i < input_size; i++) {
/// input[i] = ...;
// }
/// interpreter.Invoke();
/// </code></pre>
///
/// Note: For nearly all practical use cases, one should not directly construct
/// an Interpreter object, but rather use the InterpreterBuilder.
///
/// WARNING: This class is *not* thread-safe. The client is responsible for
/// ensuring serialized interaction to avoid data races and undefined behavior.
class Interpreter {
public:
// Instantiate an interpreter. All errors associated with reading and
// processing this model will be forwarded to the error_reporter object.
//
// Note, if error_reporter is nullptr, then a default StderrReporter is
// used. Ownership of 'error_reporter' remains with the caller.
// WARNING: Use of this constructor outside of an InterpreterBuilder is not
// recommended.
explicit Interpreter(ErrorReporter* error_reporter = DefaultErrorReporter());
~Interpreter();
// Interpreters are not copyable as they have non-trivial memory semantics.
Interpreter(const Interpreter&) = delete;
Interpreter& operator=(const Interpreter&) = delete;
// Functions to build interpreter
#ifndef DOXYGEN_SKIP
/// Provide a list of tensor indexes that are inputs to the model.
/// Each index is bound check and this modifies the consistent_ flag of the
/// interpreter.
TfLiteStatus SetInputs(std::vector<int> inputs);
/// Provide a list of tensor indexes that are outputs to the model
/// Each index is bound check and this modifies the consistent_ flag of the
/// interpreter.
TfLiteStatus SetOutputs(std::vector<int> outputs);
/// Provide a list of tensor indexes that are variable tensors.
/// Each index is bound check and this modifies the consistent_ flag of the
/// interpreter.
TfLiteStatus SetVariables(std::vector<int> variables);
/// Ensure the internal node storage memory allocates at least `count`
/// spots for node. NOTE, this doesn't actually add operators. This is an
/// efficiency optimization that is subject to change.
void ReserveNodes(int count);
/// Adds a node with the given parameters and returns the index of the new
/// node in `node_index` (optionally). Interpreter will take ownership of
/// `builtin_data` and destroy it with `free`. Ownership of 'init_data'
/// remains with the caller.
TfLiteStatus AddNodeWithParameters(const std::vector<int>& inputs,
const std::vector<int>& outputs,
const char* init_data,
size_t init_data_size, void* builtin_data,
const TfLiteRegistration* registration,
int* node_index = nullptr);
/// Adds `tensors_to_add` tensors, preserving pre-existing Tensor entries.
/// The value pointed to by `first_new_tensor_index` will be set to the
/// index of the first new tensor if `first_new_tensor_index` is non-null.
TfLiteStatus AddTensors(int tensors_to_add,
int* first_new_tensor_index = nullptr);
/// Set description of inputs/outputs/data/fptrs for node `node_index`.
/// This variant assumes an external buffer has been allocated of size
/// bytes. The lifetime of buffer must be ensured to be greater or equal
/// to Interpreter.
TfLiteStatus SetTensorParametersReadOnly(
int tensor_index, TfLiteType type, const char* name,
const std::vector<int>& dims, TfLiteQuantization quantization,
const char* buffer, size_t bytes, const Allocation* allocation = nullptr);
/// Legacy. Deprecated in favor of above.
inline TfLiteStatus SetTensorParametersReadOnly(
int tensor_index, TfLiteType type, const char* name,
const std::vector<int>& dims, TfLiteQuantizationParams quantization,
const char* buffer, size_t bytes,
const Allocation* allocation = nullptr) {
return SetTensorParametersReadOnly(tensor_index, type, name, dims.size(),
dims.data(), quantization, buffer, bytes,
allocation);
}
TfLiteStatus SetTensorParametersReadOnly(
int tensor_index, TfLiteType type, const char* name, const size_t rank,
const int* dims, TfLiteQuantizationParams quantization,
const char* buffer, size_t bytes, const Allocation* allocation = nullptr);
/// Set description of inputs/outputs/data/fptrs for node `node_index`.
/// This variant assumes an external buffer has been allocated of size
/// bytes. The lifetime of buffer must be ensured to be greater or equal
/// to Interpreter.
TfLiteStatus SetTensorParametersReadWrite(int tensor_index, TfLiteType type,
const char* name,
const std::vector<int>& dims,
TfLiteQuantization quantization,
bool is_variable = false);
/// Legacy. Deprecated in favor of above.
inline TfLiteStatus SetTensorParametersReadWrite(
int tensor_index, TfLiteType type, const char* name,
const std::vector<int>& dims, TfLiteQuantizationParams quantization,
bool is_variable = false,
const std::vector<int>* dims_signature = nullptr) {
size_t rank_dims_signature = 0;
const int* dims_signature_pointer = nullptr;
if (dims_signature) {
rank_dims_signature = dims_signature->size();
dims_signature_pointer = dims_signature->data();
}
return SetTensorParametersReadWrite(
tensor_index, type, name, dims.size(), dims.data(), quantization,
is_variable, rank_dims_signature, dims_signature_pointer);
}
TfLiteStatus SetTensorParametersReadWrite(
int tensor_index, TfLiteType type, const char* name, const size_t rank,
const int* dims, TfLiteQuantizationParams quantization,
bool is_variable = false, const size_t rank_dims_signature = 0,
const int* dims_signature = nullptr);
#endif // DOXYGEN_SKIP
// Functions to access tensor data
/// Read only access to list of inputs.
const std::vector<int>& inputs() const { return primary_subgraph().inputs(); }
/// Return the name of a given input. The given index must be between 0 and
/// inputs().size().
const char* GetInputName(int index) const {
return context_->tensors[inputs()[index]].name;
}
/// Read only access to list of outputs.
const std::vector<int>& outputs() const {
return primary_subgraph().outputs();
}
/// Read only access to list of variable tensors.
const std::vector<int>& variables() const {
return primary_subgraph().variables();
}
/// Return the name of a given output. The given index must be between 0 and
/// outputs().size().
const char* GetOutputName(int index) const {
return context_->tensors[outputs()[index]].name;
}
/// Return the number of tensors in the model.
size_t tensors_size() const { return context_->tensors_size; }
/// Return the number of ops in the model.
size_t nodes_size() const { return primary_subgraph().nodes_size(); }
/// WARNING: Experimental interface, subject to change
const std::vector<int>& execution_plan() const {
return primary_subgraph().execution_plan();
}
#ifndef DOXYGEN_
/// WARNING: Experimental interface, subject to change
/// Overrides execution plan. This bounds checks indices sent in.
TfLiteStatus SetExecutionPlan(const std::vector<int>& new_plan);
#endif // DOXYGEN_SKIP
/// Get a mutable tensor data structure.
// TODO(aselle): Create a safe ArrayHandle interface to avoid exposing this
// read/write access to structure
TfLiteTensor* tensor(int tensor_index) {
return primary_subgraph().tensor(tensor_index);
}
/// Get an immutable tensor data structure.
const TfLiteTensor* tensor(int tensor_index) const {
return primary_subgraph().tensor(tensor_index);
}
/// Get a pointer to an operation and registration data structure if in
/// bounds.
const std::pair<TfLiteNode, TfLiteRegistration>* node_and_registration(
int node_index) const {
return primary_subgraph().node_and_registration(node_index);
}
/// Perform a checked cast to the appropriate tensor type (mutable pointer
/// version).
template <class T>
T* typed_tensor(int tensor_index) {
if (TfLiteTensor* tensor_ptr = tensor(tensor_index)) {
if (tensor_ptr->type == typeToTfLiteType<T>()) {
return reinterpret_cast<T*>(tensor_ptr->data.raw);
}
}
return nullptr;
}
/// Perform a checked cast to the appropriate tensor type (immutable pointer
/// version).
template <class T>
const T* typed_tensor(int tensor_index) const {
if (const TfLiteTensor* tensor_ptr = tensor(tensor_index)) {
if (tensor_ptr->type == typeToTfLiteType<T>()) {
return reinterpret_cast<const T*>(tensor_ptr->data.raw);
}
}
return nullptr;
}
/// WARNING: Experimental interface, subject to change
/// Returns list of all names of different method signatures defined
/// in the model.
/// Note, pointers returned have lifetime same as the Interpreter object.
std::vector<const std::string*> signature_def_names() const {
std::vector<const std::string*> method_names;
method_names.reserve(signature_defs_.size());
for (const auto& sig_def : signature_defs_) {
method_names.emplace_back(&sig_def.method_name);
}
return method_names;
}
/// WARNING: Experimental interface, subject to change
/// Returns the mapping of inputs to tensor index in the signature
/// specified through 'method_name'.
/// If invalid name passed, an empty list will be returned.
const std::map<std::string, uint32_t>& signature_inputs(
const char* method_name) const {
for (const auto& sig_def : signature_defs_) {
if (sig_def.method_name == method_name) return sig_def.inputs;
}
static const std::map<std::string, uint32_t>* default_empty_list =
new std::map<std::string, uint32_t>();
return *default_empty_list;
}
/// WARNING: Experimental interface, subject to change
/// Returns the mapping of outputs to tensor index in the signature
/// specified through 'method_name'.
/// If invalid name passed, an empty list will be returned.
const std::map<std::string, uint32_t>& signature_outputs(
const char* method_name) const {
for (const auto& sig_def : signature_defs_) {
if (sig_def.method_name == method_name) return sig_def.outputs;
}
static const std::map<std::string, uint32_t>* default_empty_list =
new std::map<std::string, uint32_t>();
return *default_empty_list;
}
/// WARNING: Experimental interface, subject to change
/// Returns the input tensor identified by 'signature_input_name' in the
/// signature identified by 'signature_method_name'.
/// Returns nullptr if not found.
TfLiteTensor* input_tensor_by_signature_name(
const char* signature_input_name, const char* signature_method_name) {
const int tensor_index = GetTensorIndexFromSignatureDefName(
signature_input_name, signature_method_name, /*is_input=*/true);
return tensor_index == -1 ? nullptr : tensor(tensor_index);
}
/// WARNING: Experimental interface, subject to change
/// Returns the output tensor identified by 'signature_output_name' in the
/// signature identified by 'signature_method_name'.
/// Returns nullptr if not found.
const TfLiteTensor* output_tensor_by_signature_name(
const char* signature_output_name,
const char* signature_method_name) const {
const int tensor_index = GetTensorIndexFromSignatureDefName(
signature_output_name, signature_method_name, /*is_input=*/false);
return tensor_index == -1 ? nullptr : tensor(tensor_index);
}
/// Return a mutable pointer to the given input tensor. The given index must
/// be between 0 and inputs().size().
TfLiteTensor* input_tensor(size_t index) { return tensor(inputs()[index]); }
/// Return an immutable pointerto the given input tensor. The given index must
/// be between 0 and inputs().size().
const TfLiteTensor* input_tensor(size_t index) const {
return tensor(inputs()[index]);
}
/// Return a mutable pointer into the data of a given input tensor. The given
/// index must be between 0 and inputs().size().
template <class T>
T* typed_input_tensor(int index) {
return typed_tensor<T>(inputs()[index]);
}
/// Return an immutable pointer into the data of a given input tensor. The
/// given index must be between 0 and inputs().size().
template <class T>
const T* typed_input_tensor(int index) const {
return typed_tensor<T>(inputs()[index]);
}
/// Return a mutable pointer to the given output tensor. The given index must
/// be between 0 and outputs().size().
TfLiteTensor* output_tensor(size_t index) { return tensor(outputs()[index]); }
/// Return an immutable pointer to the given output tensor. The given index
/// must be between 0 and outputs().size().
const TfLiteTensor* output_tensor(size_t index) const {
return tensor(outputs()[index]);
}
/// Return a mutable pointer into the data of a given output tensor. The given
/// index must be between 0 and outputs().size().
template <class T>
T* typed_output_tensor(int index) {
return typed_tensor<T>(outputs()[index]);
}
/// Return an immutable pointer into the data of a given output tensor. The
/// given index must be between 0 and outputs().size().
template <class T>
const T* typed_output_tensor(int index) const {
return typed_tensor<T>(outputs()[index]);
}
/// Change the dimensionality of a given tensor. Note, this is only acceptable
/// for tensor indices that are inputs or variables.
/// Returns status of failure or success. Note that this doesn't actually
/// resize any existing buffers. A call to AllocateTensors() is required to
/// change the tensor input buffer.
TfLiteStatus ResizeInputTensor(int tensor_index,
const std::vector<int>& dims);
// WARNING: Experimental interface, subject to change
// Change the dimensionality of a given tensor. This is only acceptable for
// tensor indices that are inputs or variables. Only unknown dimensions can be
// resized with this function. Unknown dimensions are indicated as `-1` in the
// `dims_signature` attribute of a `TfLiteTensor`. Returns status of failure
// or success. Note that this doesn't actually resize any existing buffers.
/// A call to AllocateTensors() is required to change the tensor input buffer.
TfLiteStatus ResizeInputTensorStrict(int tensor_index,
const std::vector<int>& dims);
// This releases memory held by non-persistent tensors. It does NOT re-perform
// memory planning.
// AllocateTensors needs to be called before next invocation.
/// WARNING: Experimental interface, subject to change
TfLiteStatus ReleaseNonPersistentMemory();
// Update allocations for all tensors. This will redim dependent tensors
// using the input tensor dimensionality as given. This is relatively
// expensive. This *must be* called after the interpreter has been created
// and before running inference (and accessing tensor buffers), and *must be*
// called again if (and only if) an input tensor is resized. Returns status of
// success or failure. Will fail if any of the ops in the model (other than
// those which were rewritten by delegates, if any) are not supported by the
// Interpreter's OpResolver.
TfLiteStatus AllocateTensors();
/// Invoke the interpreter (run the whole graph in dependency order).
///
/// NOTE: It is possible that the interpreter is not in a ready state
/// to evaluate (i.e. if a ResizeTensor() has been performed without an
/// AllocateTensors().
/// Returns status of success or failure.
TfLiteStatus Invoke();
/// Set the number of threads available to the interpreter.
///
/// NOTE: num_threads should be >= -1. Setting num_threads to 0 has the effect
/// to disable multithreading, which is equivalent to setting num_threads
/// to 1. If set to the value -1, the number of threads used will be
/// implementation-defined and platform-dependent.
TfLiteStatus SetNumThreads(int num_threads);
/// Allow float16 precision for FP32 calculation when possible.
/// Default: not allow.
///
/// WARNING: This API is deprecated: prefer controlling this via delegate
/// options, e.g. `tflite::StatefulNnApiDelegate::Options::allow_fp16' or
/// `TfLiteGpuDelegateOptionsV2::is_precision_loss_allowed`.
/// This method will be removed in a future release.
void SetAllowFp16PrecisionForFp32(bool allow);
/// Get the half precision flag.
/// WARNING: This is an experimental API and subject to change.
bool GetAllowFp16PrecisionForFp32() const {
return context_->allow_fp32_relax_to_fp16;
}
/// Sets the cancellation function pointer in order to cancel a request in the
/// middle of a call to Invoke(). The interpreter queries this function during
/// inference, between op invocations; when it returns true, the interpreter
/// will abort execution and return `kTfLiteError`. The `data` parameter
/// contains any data used by the cancellation function, and if non-null,
/// remains owned by the caller.
/// WARNING: This is an experimental API and subject to change.
void SetCancellationFunction(void* data, bool (*check_cancelled_func)(void*));
/// Allow a delegate to look at the graph and modify the graph to handle
/// parts of the graph themselves. After this is called, the graph may
/// contain new nodes that replace 1 more nodes.
/// 'delegate' must outlive the interpreter.
/// Returns one of the following four status codes:
/// 1. kTfLiteOk: Success.
/// 2. kTfLiteDelegateError: Delegation failed due to an error in the
/// delegate, or the delegate parameter was null. The Interpreter has been
/// restored to its pre-delegation state.
/// NOTE: This undoes all delegates previously applied to the Interpreter.
/// 3. kTfLiteApplicationError : Delegation failed to be applied due to the
/// incompatibility with the TfLite runtime, e.g., the model graph is already
/// immutable when applying the delegate. However, the interpreter could still
/// be invoked.
/// 4. kTfLiteError: Unexpected/runtime failure.
/// WARNING: This is an experimental API and subject to change.
TfLiteStatus ModifyGraphWithDelegate(TfLiteDelegate* delegate);
// Owning handle to a TfLiteDelegate instance.
using TfLiteDelegatePtr =
std::unique_ptr<TfLiteDelegate, void (*)(TfLiteDelegate*)>;
/// Same as ModifyGraphWithDelegate except this interpreter takes
/// ownership of the provided delegate.
/// WARNING: This is an experimental API and subject to change.
template <typename Delegate, typename Deleter>
inline TfLiteStatus ModifyGraphWithDelegate(
std::unique_ptr<Delegate, Deleter> delegate) {
Deleter deleter = std::move(delegate.get_deleter());
// Note that we retain ownership of the delegate even if graph modification
// fails, as delegate use will be in an indeterminate state at that point.
owned_delegates_.emplace_back(
delegate.release(), [deleter](TfLiteDelegate* delegate_to_delete) {
deleter(
static_cast<typename std::unique_ptr<Delegate, Deleter>::pointer>(
delegate_to_delete));
});
return ModifyGraphWithDelegate(owned_delegates_.back().get());
}
/// This overload is *never* OK. TfLiteDelegate is a C structure, so it has no
/// virtual destructor. The default deleter of the unique_ptr does not know
/// how to delete C++ objects deriving from TfLiteDelegate.
TfLiteStatus ModifyGraphWithDelegate(
std::unique_ptr<TfLiteDelegate> delegate) = delete;
/// Ensure the data in `tensor.data` is readable. In case delegate is used,
/// it might require to copy the data from delegate buffer to raw memory.
/// WARNING: This is an experimental API and subject to change.
TfLiteStatus EnsureTensorDataIsReadable(int tensor_index) {
return primary_subgraph().EnsureTensorDataIsReadable(tensor_index);
}
/// Set the delegate buffer handle to a tensor. It can be called in the
/// following cases:
/// 1. Set the buffer handle to a tensor that's not being written by a
/// delegate. For example, feeding an OpenGL texture as the input of the
/// inference graph.
/// 2. Set the buffer handle to a tensor that uses the same delegate.
/// For example, set an OpenGL texture as the output of inference, while
/// the node which produces output is an OpenGL delegate node.
/// WARNING: This is an experimental API and subject to change.
TfLiteStatus SetBufferHandle(int tensor_index,
TfLiteBufferHandle buffer_handle,
TfLiteDelegate* delegate);
/// Get the delegate buffer handle, and the delegate which can process the
/// buffer handle.
/// WARNING: This is an experimental API and subject to change.
TfLiteStatus GetBufferHandle(int tensor_index,
TfLiteBufferHandle* buffer_handle,
TfLiteDelegate** delegate);
/// Sets the profiler to tracing execution. The caller retains ownership
/// of the profiler and must ensure its validity.
/// WARNING: This is an experimental API and subject to change.
void SetProfiler(Profiler* profiler);
/// Same as SetProfiler except this interpreter takes ownership
/// of the provided profiler.
/// WARNING: This is an experimental API and subject to change.
void SetProfiler(std::unique_ptr<Profiler> profiler);
/// Gets the profiler used for op tracing.
/// WARNING: This is an experimental API and subject to change.
Profiler* GetProfiler();
// The default capacity of `tensors_` vector.
static constexpr int kTensorsReservedCapacity = 128;
/// The capacity headroom of `tensors_` vector before calling ops'
/// `prepare` and `invoke` function. In these functions, it's guaranteed
/// allocating up to `kTensorsCapacityHeadroom` more tensors won't invalidate
/// pointers to existing tensors.
static constexpr int kTensorsCapacityHeadroom = 16;
/// Set if buffer handle output is allowed.
//
/// When using hardware delegation, Interpreter will make the data of output
/// tensors available in `tensor->data` by default. If the application can
/// consume the buffer handle directly (e.g. reading output from OpenGL
/// texture), it can set this flag to false, so Interpreter won't copy the
/// data from buffer handle to CPU memory. WARNING: This is an experimental
/// API and subject to change.
void SetAllowBufferHandleOutput(bool allow_buffer_handle_output) {
allow_buffer_handle_output_ = allow_buffer_handle_output;
}
/// Reset all variable tensors to the default value.
/// If a variable tensor doesn't have a buffer, reset it to zero.
/// TODO(b/115961645): Implement - If a variable tensor has a buffer, reset it
/// to the value of the buffer.
/// WARNING: This is an experimental API and subject to change.
TfLiteStatus ResetVariableTensors();
/// Retrieve an operator's description of its work, for profiling purposes.
const char* OpProfilingString(const TfLiteRegistration& op_reg,
const TfLiteNode* node) const {
if (op_reg.profiling_string == nullptr) return nullptr;
return op_reg.profiling_string(context_, node);
}
// Set the value of an external context. TFLite interpreter doesn't take the
// memory ownership of this external context 'ctx', and the context should
// outlive the TFLite interpreter.
void SetExternalContext(TfLiteExternalContextType type,
TfLiteExternalContext* ctx);
// Assigns (or reassigns) a custom memory allocation for the given tensor.
// `flags` is a bitmask, see TfLiteCustomAllocationFlags.
// The runtime does NOT take ownership of the underlying memory.
//
// NOTE: User needs to call AllocateTensors() after this. In case of input
// resizing, buffers will be checked for required data size during
// AllocateTensors().
//
// Parameters should satisfy the following conditions:
// 1. tensor->allocation_type == kTfLiteArenaRw or kTfLiteArenaRwPersistent
// In general, this is true for I/O tensors & variable tensors.
// 2. allocation->data has the appropriate permissions for runtime access
// (Read-only for inputs, Read-Write for others), and outlives Interpreter.
// 3. allocation->bytes >= tensor->bytes.
// This condition is checked again if any tensors are resized.
// 4. allocation->data should be aligned to kDefaultTensorAlignment
// defined in lite/util.h. (Currently 64 bytes)
// This check is skipped if kTfLiteCustomAllocationFlagsSkipAlignCheck is
// set through `flags`.
//
// WARNING: This is an experimental interface that is subject to change.
TfLiteStatus SetCustomAllocationForTensor(
int tensor_index, const TfLiteCustomAllocation& allocation,
int64_t flags = kTfLiteCustomAllocationFlagsNone);
#ifndef DOXYGEN_SKIP
/// Adds `subgraphs_to_add` subgraphs, preserving pre-existing Subgraph
/// entries. The value pointed to by `first_new_subgraph_index` will be set to
/// the index of the first new subgraph if `first_new_subgraph_index` is
/// non-null.
/// WARNING: This is an experimental API and subject to change.
void AddSubgraphs(int subgraphs_to_add,
int* first_new_subgraph_index = nullptr);
/// Return the number of subgraphs in the model.
/// WARNING: This is an experimental API and subject to change.
size_t subgraphs_size() const { return subgraphs_.size(); }
/// Get a pointer to a subgraph if in bounds.
/// WARNING: This is an experimental API and subject to change.
Subgraph* subgraph(int subgraph_index) {
if (subgraph_index < 0 ||
static_cast<size_t>(subgraph_index) >= subgraphs_size())
return nullptr;
return &*subgraphs_[subgraph_index];
}
/// WARNING: Experimental interface, subject to change
Subgraph& primary_subgraph() {
return *subgraphs_.front(); /// Safe as subgraphs_ always has 1 entry.
}
/// WARNING: Experimental interface, subject to change
const Subgraph& primary_subgraph() const {
return *subgraphs_.front(); // Safe as subgraphs_ always has 1 entry.
}
/// WARNING: Experimental interface, subject to change
// Get the error reporter associated with this interpreter.
ErrorReporter* error_reporter() const { return error_reporter_; }
#endif // DOXYGEN_SKIP
private:
// Structure representing SignatureDef inputs/outputs.
struct SignatureDef {
// Maps name in signature def as key to index of the tensor in the model.
std::map<std::string, uint32_t> inputs;
// Maps name in signature def as key to index of the tensor in the model.
std::map<std::string, uint32_t> outputs;
// The method name for this signature.
std::string method_name;
// The key of this SignatureDef in the SavedModel signature def map.
std::string signature_def_key;
};
friend class InterpreterBuilder;
friend class tflite::InterpreterTest;
friend class tflite::delegates::InterpreterUtils;
friend class tflite::delegates::test_utils::TestDelegation;
/// Set the value of an external context.
static void SetExternalContext(struct TfLiteContext* context,
TfLiteExternalContextType type,
TfLiteExternalContext* ctx);
// Helper method that return the tensor index that corresponds to
// a name in a SignatureDef. Defined by 'signature_method_name', and
// 'signature_tensor_name'.
// If 'is_input' is true then the tensor is checked in input tensors,
// otherwise it will be checked in output tensors.
// Returns -1 if the tensor is not found.
int GetTensorIndexFromSignatureDefName(const char* signature_tensor_name,
const char* signature_method_name,
bool is_input) const {
// Iterate directly and don't use other methods to avoid extra allocation.
for (const auto& signature : signature_defs_) {
if (signature.method_name != signature_method_name) continue;
auto& signature_list = (is_input ? signature.inputs : signature.outputs);
auto tensor_iter = signature_list.find(signature_tensor_name);
if (tensor_iter == signature_list.end()) return -1;
return tensor_iter->second;
}
return -1;
}
// Sets the profiler to all subgraphs.
void SetSubgraphProfiler();
// Remove delegates (for fallback behaviour). The interpreter is invokable
// afterwards.
TfLiteStatus RemoveAllDelegates();
// Returns true if delegates have been applied.
bool HasDelegates();
// Returns true if cancellation function returns true.
bool IsCancelled();
// Sets the list of signature defs in the model.
void SetSignatureDef(std::vector<SignatureDef> signature_defs) {
signature_defs_ = std::move(signature_defs);
}
// Enables preserving intermediates for debugging. Should only be set by
// InterpreterBuilder before allocating any tensors.
TfLiteStatus PreserveAllTensorsExperimental();
// A pure C data structure used to communicate with the pure C plugin
// interface. To avoid copying tensor metadata, this is also the definitive
// structure to store tensors.
// This is the primary subgraph context.
TfLiteContext* context_ = nullptr;
// The error reporter delegate that tflite will forward queries errors to.
ErrorReporter* error_reporter_ = nullptr;
// List of delegates that have been installed and are owned by this
// interpreter instance. Useful if client delegate ownership is burdensome.
// WARNING: This is an experimental API and subject to change.
// TODO(b/116667551): Use TfLiteExternalContext for storing state.
std::vector<
std::unique_ptr<TfLiteDelegate, std::function<void(TfLiteDelegate*)>>>
owned_delegates_;
// Profiler that has been installed and is owned by this interpreter instance.
// Useful if client profiler ownership is burdensome.
std::unique_ptr<Profiler> owned_profiler_;
// Points to the installed Profiler instance.
Profiler* installed_profiler_ = nullptr;
bool allow_buffer_handle_output_ = false;
// List of active external contexts.
TfLiteExternalContext* external_contexts_[kTfLiteMaxExternalContexts];
// The default external cpu backend context. After an TFLite interpreter is
// initialized, 'external_contexts_[kTfLiteCpuBackendContext]' is set to point
// to this object. However, if this element value is overwritten via calling
// 'SetExternalContext(kTfLiteCpuBackendContext, ...)', we will reset this to
// nullptr if necessary.
std::unique_ptr<ExternalCpuBackendContext> own_external_cpu_backend_context_;
// Subgraphs
std::vector<std::unique_ptr<Subgraph>> subgraphs_;
// A map of resources. Owned by interpreter and shared by multiple subgraphs.
resource::ResourceMap resources_;
// Indicating delegates that the TFLite interpreter will apply by default.
// An empty one means there's no delegate to be applied by default or
// delegates have been applied and doesn't need to be applied again.
std::vector<TfLiteDelegatePtr> lazy_delegate_providers_;
// List of signature def mapping inputs/output to tensor ids.
// We just keep track of tensor index.
std::vector<SignatureDef> signature_defs_;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_INTERPRETER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/interpreter.h | C++ | apache-2.0 | 34,728 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
/// \file
/// Provides functionality to construct an interpreter for a model.
///
#ifndef TENSORFLOW_LITE_INTERPRETER_BUILDER_H_
#define TENSORFLOW_LITE_INTERPRETER_BUILDER_H_
#include <memory>
#include <vector>
#include "flatbuffers/flatbuffers.h" // from @flatbuffers
#include "tensorflow/lite/allocation.h"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/core/api/error_reporter.h"
#include "tensorflow/lite/core/api/op_resolver.h"
#include "tensorflow/lite/core/subgraph.h"
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/model_builder.h"
#include "tensorflow/lite/mutable_op_resolver.h"
#include "tensorflow/lite/schema/schema_generated.h"
#include "tensorflow/lite/stderr_reporter.h"
namespace tflite {
/// Build an interpreter capable of interpreting `model`.
///
/// `model`: A model whose lifetime must be at least as long as any
/// interpreter(s) created by the builder. In principle multiple interpreters
/// can be made from a single model.
/// `op_resolver`: An instance that implements the `OpResolver` interface, which
/// maps custom op names and builtin op codes to op registrations. The
/// lifetime of the provided `op_resolver` object must be at least as long as
/// the `InterpreterBuilder`; unlike `model` and `error_reporter`, the
/// `op_resolver` does not need to exist for the duration of any created
/// `Interpreter` objects.
/// `error_reporter`: a functor that is called to report errors that handles
/// printf var arg semantics. The lifetime of the `error_reporter` object must
/// be greater than or equal to the `Interpreter` created by `operator()`.
///
/// Returns a kTfLiteOk when successful and sets interpreter to a valid
/// Interpreter. Note: The user must ensure the lifetime of the model (and error
/// reporter, if provided) is at least as long as interpreter's lifetime, and
/// a single model instance may safely be used with multiple interpreters.
class InterpreterBuilder {
public:
/// For this constructor, the ErrorReporter will be extracted from the
/// FlatBufferModel.
InterpreterBuilder(const FlatBufferModel& model,
const OpResolver& op_resolver);
/// Builds an interpreter given only the raw flatbuffer Model object (instead
/// of a FlatBufferModel). Mostly used for testing.
/// If `error_reporter` is null, then DefaultErrorReporter() is used.
InterpreterBuilder(const ::tflite::Model* model,
const OpResolver& op_resolver,
ErrorReporter* error_reporter = DefaultErrorReporter());
~InterpreterBuilder();
InterpreterBuilder(const InterpreterBuilder&) = delete;
InterpreterBuilder& operator=(const InterpreterBuilder&) = delete;
/// Builds an interpreter and stores it in `*interpreter`.
/// On success, returns kTfLiteOk and sets `*interpreter` to a valid
/// Interpreter.
/// On failure, returns an error status and sets `*interpreter` to nullptr.
TfLiteStatus operator()(std::unique_ptr<Interpreter>* interpreter);
/// Same as above, but also sets the number of CPU threads to use
/// (overriding any previous call to SetNumThreads).
/// Deprecated: use the SetNumThreads method instead.
TfLiteStatus operator()(std::unique_ptr<Interpreter>* interpreter,
int num_threads);
/// Sets the number of CPU threads to use for the interpreter.
/// Returns kTfLiteOk on success, kTfLiteError on error.
TfLiteStatus SetNumThreads(int num_threads);
/// Enables preserving intermediates for debugging. Otherwise, by default
/// intermediates are undefined due to memory planning and reuse.
InterpreterBuilder& PreserveAllTensorsExperimental();
/// Any delegates added with AddDelegate will be applied to the Interpreter
/// generated by operator(), in the order that they were added. (The delegate
/// parameter passed to AddDelegate should be non-null, otherwise an error
/// will be reported, and the call to AddDelegate will have no other effect.)
/// The lifetime of the delegate must be at least as long as the lifetime of
/// any Interpreter generated by this InterpreterBuilder.
/// WARNING: This is an experimental API and subject to change.
void AddDelegate(TfLiteDelegate* delegate);
private:
TfLiteStatus BuildLocalIndexToRegistrationMapping();
TfLiteStatus ParseNodes(
const flatbuffers::Vector<flatbuffers::Offset<Operator>>* operators,
Subgraph* subgraph);
TfLiteStatus ParseTensors(
const flatbuffers::Vector<flatbuffers::Offset<Buffer>>* buffers,
const flatbuffers::Vector<flatbuffers::Offset<Tensor>>* tensors,
Subgraph* subgraph);
TfLiteStatus ApplyDelegates(Interpreter* interpreter);
TfLiteStatus ParseQuantization(const QuantizationParameters* src_quantization,
TfLiteQuantization* quantization,
const std::vector<int>& dims);
TfLiteStatus ParseSparsity(const SparsityParameters* src_sparsity,
TfLiteSparsity** sparsity);
TfLiteStatus ParseSignatureDefs(
const flatbuffers::Vector<flatbuffers::Offset<SignatureDef>>*
signature_def_list,
Interpreter* interpreter);
const ::tflite::Model* model_;
const OpResolver& op_resolver_;
ErrorReporter* error_reporter_;
std::vector<TfLiteDelegate*> delegates_;
std::vector<const TfLiteRegistration*> flatbuffer_op_index_to_registration_;
std::vector<TfLiteRegistration> unresolved_custom_ops_;
std::vector<BuiltinOperator> flatbuffer_op_index_to_registration_types_;
const Allocation* allocation_ = nullptr;
bool has_flex_op_ = false;
int num_fp32_tensors_ = 0;
bool preserve_all_tensors_ = false;
int num_threads_ = -1;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_INTERPRETER_BUILDER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/interpreter_builder.h | C++ | apache-2.0 | 6,487 |
#ifndef TENSORFLOW_LITE_INTERPRETER_TEST_UTIL_H_
#define TENSORFLOW_LITE_INTERPRETER_TEST_UTIL_H_
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include <gtest/gtest.h>
#include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/string_util.h"
namespace tflite {
// InterpreterTest is a friend of Interpreter, so it can access context_.
class InterpreterTest : public ::testing::Test {
public:
template <typename Delegate>
static TfLiteStatus ModifyGraphWithDelegate(
Interpreter* interpreter, std::unique_ptr<Delegate> delegate) {
return interpreter->ModifyGraphWithDelegate(std::move(delegate));
}
protected:
TfLiteContext* GetInterpreterContext() { return interpreter_.context_; }
std::vector<Interpreter::TfLiteDelegatePtr>*
mutable_lazy_delegate_providers() {
return &interpreter_.lazy_delegate_providers_;
}
bool HasDelegates() { return interpreter_.HasDelegates(); }
void BuildSignature(const std::string& method_name, const std::string& key,
const std::map<std::string, uint32_t>& inputs,
const std::map<std::string, uint32_t>& outputs) {
Interpreter::SignatureDef signature;
signature.inputs = inputs;
signature.outputs = outputs;
signature.method_name = method_name;
signature.signature_def_key = key;
interpreter_.SetSignatureDef({signature});
}
Interpreter interpreter_;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_INTERPRETER_TEST_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/interpreter_test_util.h | C++ | apache-2.0 | 2,259 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_H_
#include <string>
namespace tflite {
// Returns the test id to use to retrieve the acceleration configuration
// in the acceleration allowlist.
std::string GetCurrentTestId();
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/acceleration_test_util.h | C++ | apache-2.0 | 1,050 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_INTERNAL_H_
#define TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_INTERNAL_H_
#include <algorithm>
#include <atomic>
#include <functional>
#include <iterator>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "re2/re2.h"
namespace tflite {
// Reads the acceleration configuration, handles comments and empty lines and
// the basic data conversion format (split into key, value, recognition of
// the line being a white or black list entry) and gives the data to the
// consumer to be inserted into the target collection.
void ReadAccelerationConfig(
const char* config,
const std::function<void(std::string, std::string, bool)>& consumer);
template <typename T>
class ConfigurationEntry {
public:
ConfigurationEntry(const std::string& test_id_rex, T test_config,
bool is_denylist)
: test_id_rex_(test_id_rex),
test_config_(test_config),
is_denylist_(is_denylist) {}
bool Matches(const std::string& test_id) {
return RE2::FullMatch(test_id, test_id_rex_);
}
bool IsDenylistEntry() const { return is_denylist_; }
const T& TestConfig() const { return test_config_; }
const std::string& TestIdRex() const { return test_id_rex_; }
private:
std::string test_id_rex_;
T test_config_;
bool is_denylist_;
};
// Returns the acceleration test configuration for the given test id and
// the given acceleration configuration type.
// The configuration type is responsible of providing the test configuration
// and the parse function to convert configuration lines into configuration
// objects.
template <typename T>
absl::optional<T> GetAccelerationTestParam(std::string test_id) {
static std::atomic<std::vector<ConfigurationEntry<T>>*> test_config_ptr;
if (test_config_ptr.load() == nullptr) {
auto config = new std::vector<ConfigurationEntry<T>>();
auto consumer = [&config](std::string key, std::string value_str,
bool is_denylist) mutable {
T value = T::ParseConfigurationLine(value_str);
config->push_back(ConfigurationEntry<T>(key, value, is_denylist));
};
ReadAccelerationConfig(T::kAccelerationTestConfig, consumer);
// Even if it has been already set, it would be just replaced with the
// same value, just freeing the old value to avoid leaks
auto* prev_val = test_config_ptr.exchange(config);
delete prev_val;
}
const std::vector<ConfigurationEntry<T>>* test_config =
test_config_ptr.load();
const auto test_config_iter = std::find_if(
test_config->begin(), test_config->end(),
[&test_id](ConfigurationEntry<T> elem) { return elem.Matches(test_id); });
if (test_config_iter != test_config->end() &&
!test_config_iter->IsDenylistEntry()) {
return absl::optional<T>(test_config_iter->TestConfig());
} else {
return absl::optional<T>();
}
}
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_ACCELERATION_TEST_UTIL_INTERNAL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/acceleration_test_util_internal.h | C++ | apache-2.0 | 3,700 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_BUILTIN_OP_KERNELS_H_
#define TENSORFLOW_LITE_KERNELS_BUILTIN_OP_KERNELS_H_
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace ops {
namespace builtin {
// Forward declaration of all builtin op kernel registration methods. These
// registrations are included with the standard `BuiltinOpResolver`.
//
// This header is particularly useful in cases where only a subset of ops are
// needed. In such cases, the client can selectively add only the registrations
// their model requires, using a custom `OpResolver` or `MutableOpResolver`.
// Selective registration in turn allows the linker to strip unused kernels.
//
// TODO(b/184734878): auto-generate this header file from the BuiltinOperator
// enum in the FlatBuffer schema.
TfLiteRegistration* Register_ABS();
TfLiteRegistration* Register_ADD();
TfLiteRegistration* Register_ADD_N();
TfLiteRegistration* Register_ARG_MAX();
TfLiteRegistration* Register_ARG_MIN();
TfLiteRegistration* Register_AVERAGE_POOL_2D();
TfLiteRegistration* Register_BATCH_TO_SPACE_ND();
TfLiteRegistration* Register_BATCH_MATMUL();
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_LSTM();
TfLiteRegistration* Register_BIDIRECTIONAL_SEQUENCE_RNN();
TfLiteRegistration* Register_BROADCAST_TO();
TfLiteRegistration* Register_CALL_ONCE();
TfLiteRegistration* Register_CAST();
TfLiteRegistration* Register_CEIL();
TfLiteRegistration* Register_COMPLEX_ABS();
TfLiteRegistration* Register_CONCATENATION();
TfLiteRegistration* Register_CONV_2D();
TfLiteRegistration* Register_CONV_3D();
TfLiteRegistration* Register_COS();
TfLiteRegistration* Register_CUMSUM();
TfLiteRegistration* Register_DENSIFY();
TfLiteRegistration* Register_DEPTH_TO_SPACE();
TfLiteRegistration* Register_DEPTHWISE_CONV_2D();
TfLiteRegistration* Register_DEQUANTIZE();
TfLiteRegistration* Register_DIV();
TfLiteRegistration* Register_ELU();
TfLiteRegistration* Register_EMBEDDING_LOOKUP();
TfLiteRegistration* Register_EMBEDDING_LOOKUP_SPARSE();
TfLiteRegistration* Register_EQUAL();
TfLiteRegistration* Register_EXP();
TfLiteRegistration* Register_EXPAND_DIMS();
TfLiteRegistration* Register_FAKE_QUANT();
TfLiteRegistration* Register_FILL();
TfLiteRegistration* Register_FLOOR();
TfLiteRegistration* Register_FLOOR_DIV();
TfLiteRegistration* Register_FLOOR_MOD();
TfLiteRegistration* Register_FULLY_CONNECTED();
TfLiteRegistration* Register_GATHER();
TfLiteRegistration* Register_GATHER_ND();
TfLiteRegistration* Register_GREATER();
TfLiteRegistration* Register_GREATER_EQUAL();
TfLiteRegistration* Register_HARD_SWISH();
TfLiteRegistration* Register_HASHTABLE();
TfLiteRegistration* Register_HASHTABLE_FIND();
TfLiteRegistration* Register_HASHTABLE_LOOKUP();
TfLiteRegistration* Register_HASHTABLE_IMPORT();
TfLiteRegistration* Register_HASHTABLE_SIZE();
TfLiteRegistration* Register_IF();
TfLiteRegistration* Register_IMAG();
TfLiteRegistration* Register_L2_NORMALIZATION();
TfLiteRegistration* Register_L2_POOL_2D();
TfLiteRegistration* Register_LEAKY_RELU();
TfLiteRegistration* Register_LESS();
TfLiteRegistration* Register_LESS_EQUAL();
TfLiteRegistration* Register_LOCAL_RESPONSE_NORMALIZATION();
TfLiteRegistration* Register_LOG();
TfLiteRegistration* Register_LOGICAL_AND();
TfLiteRegistration* Register_LOGICAL_NOT();
TfLiteRegistration* Register_LOGICAL_OR();
TfLiteRegistration* Register_LOGISTIC();
TfLiteRegistration* Register_LOG_SOFTMAX();
TfLiteRegistration* Register_LSH_PROJECTION();
TfLiteRegistration* Register_LSTM();
TfLiteRegistration* Register_MATRIX_DIAG();
TfLiteRegistration* Register_MATRIX_SET_DIAG();
TfLiteRegistration* Register_MAXIMUM();
TfLiteRegistration* Register_MAX_POOL_2D();
TfLiteRegistration* Register_MEAN();
TfLiteRegistration* Register_MINIMUM();
TfLiteRegistration* Register_MIRROR_PAD();
TfLiteRegistration* Register_MUL();
TfLiteRegistration* Register_NEG();
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V4();
TfLiteRegistration* Register_NON_MAX_SUPPRESSION_V5();
TfLiteRegistration* Register_NOT_EQUAL();
TfLiteRegistration* Register_ONE_HOT();
TfLiteRegistration* Register_PACK();
TfLiteRegistration* Register_PAD();
TfLiteRegistration* Register_PADV2();
TfLiteRegistration* Register_POW();
TfLiteRegistration* Register_PRELU();
TfLiteRegistration* Register_QUANTIZE();
TfLiteRegistration* Register_RANGE();
TfLiteRegistration* Register_RANK();
TfLiteRegistration* Register_REAL();
TfLiteRegistration* Register_REDUCE_ALL();
TfLiteRegistration* Register_REDUCE_ANY();
TfLiteRegistration* Register_REDUCE_MAX();
TfLiteRegistration* Register_REDUCE_MIN();
TfLiteRegistration* Register_REDUCE_PROD();
TfLiteRegistration* Register_RELU();
TfLiteRegistration* Register_RELU6();
TfLiteRegistration* Register_RELU_N1_TO_1();
TfLiteRegistration* Register_RESHAPE();
TfLiteRegistration* Register_RESIZE_BILINEAR();
TfLiteRegistration* Register_RESIZE_NEAREST_NEIGHBOR();
TfLiteRegistration* Register_REVERSE_SEQUENCE();
TfLiteRegistration* Register_REVERSE_V2();
TfLiteRegistration* Register_RFFT2D();
TfLiteRegistration* Register_RNN();
TfLiteRegistration* Register_ROUND();
TfLiteRegistration* Register_RSQRT();
TfLiteRegistration* Register_SCATTER_ND();
TfLiteRegistration* Register_SEGMENT_SUM();
TfLiteRegistration* Register_SELECT();
TfLiteRegistration* Register_SELECT_V2();
TfLiteRegistration* Register_SHAPE();
TfLiteRegistration* Register_SIN();
TfLiteRegistration* Register_SKIP_GRAM();
TfLiteRegistration* Register_SLICE();
TfLiteRegistration* Register_SOFTMAX();
TfLiteRegistration* Register_SPACE_TO_BATCH_ND();
TfLiteRegistration* Register_SPACE_TO_DEPTH();
TfLiteRegistration* Register_SPARSE_TO_DENSE();
TfLiteRegistration* Register_SPLIT();
TfLiteRegistration* Register_SPLIT_V();
TfLiteRegistration* Register_SQRT();
TfLiteRegistration* Register_SQUARE();
TfLiteRegistration* Register_SQUARED_DIFFERENCE();
TfLiteRegistration* Register_SQUEEZE();
TfLiteRegistration* Register_STRIDED_SLICE();
TfLiteRegistration* Register_SUB();
TfLiteRegistration* Register_SUM();
TfLiteRegistration* Register_SVDF();
TfLiteRegistration* Register_TANH();
TfLiteRegistration* Register_TILE();
TfLiteRegistration* Register_TOPK_V2();
TfLiteRegistration* Register_TRANSPOSE();
TfLiteRegistration* Register_TRANSPOSE_CONV();
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_LSTM();
TfLiteRegistration* Register_UNIDIRECTIONAL_SEQUENCE_RNN();
TfLiteRegistration* Register_UNIQUE();
TfLiteRegistration* Register_UNPACK();
TfLiteRegistration* Register_WHERE();
TfLiteRegistration* Register_WHILE();
TfLiteRegistration* Register_ZEROS_LIKE();
} // namespace builtin
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_BUILTIN_OP_KERNELS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/builtin_op_kernels.h | C++ | apache-2.0 | 7,335 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_CONTEXT_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_CONTEXT_H_
#if (defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || \
defined(_M_X64))
#define TFLITE_X86_PLATFORM
#endif
#include <memory>
#include "public/gemmlowp.h"
#include "ruy/context.h" // from @ruy
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/external_cpu_backend_context.h"
namespace tflite {
class CpuBackendContext final : public TfLiteInternalBackendContext {
public:
static CpuBackendContext* GetFromContext(TfLiteContext* context);
CpuBackendContext();
~CpuBackendContext() override;
ruy::Context* ruy_context() const { return ruy_context_.get(); }
gemmlowp::GemmContext* gemmlowp_context() const {
return gemmlowp_context_.get();
}
// Sets the maximum-number-of-threads-to-use parameter, only as a means of
// passing around this information.
void SetMaxNumThreads(int max_num_threads) override;
int max_num_threads() const { return max_num_threads_; }
void SetUseCaching(bool flag);
bool use_caching() const { return use_caching_; }
void ClearCaches() override { ruy_context_->ClearPrepackedCache(); }
// Gemmlowp on x86 is a deprecated path but some clients may still use
// this path based on link time dependencies.
bool PreferGemmlowpOnX86();
private:
bool RuyHasAvxOrAbove();
// Copy the wrapper class for cpuinfo from Ruy.
class CpuInfo final {
public:
CpuInfo() {}
~CpuInfo();
// X86 features
bool Avx();
bool Avx2Fma();
bool Avx512();
private:
enum class InitStatus {
kNotYetAttempted,
kInitialized,
kFailed,
};
InitStatus init_status_ = InitStatus::kNotYetAttempted;
bool EnsureInitialized();
InitStatus Initialize();
CpuInfo(const CpuInfo&) = delete;
CpuInfo& operator=(const CpuInfo&) = delete;
};
// To enable a smooth transition from the current direct usage
// of the underlying gemmlowp context to going through abstractions
// (see :cpu_backend_gemm), for now a CpuBackendContext always
// stores both a gemmlowp context and a ruy context.
// TODO(b/131416458): Once call sites all go through abstractions,
// elide what can be elided based on TFLITE_WITH_RUY.
const std::unique_ptr<ruy::Context> ruy_context_;
const std::unique_ptr<gemmlowp::GemmContext> gemmlowp_context_;
CpuInfo cpuinfo_;
// The maximum of threads used for parallelizing TfLite ops. However,
// cpu_backend_threadpool::Execute creates as many threads as it's
// asked to, regardless of this. Typically a call site would query
// cpu_backend_context->max_num_threads() and used that to determine
// the number of tasks to create and to give to
// cpu_backend_threadpool::Execute.
//
// This value also gets propagated to back-ends, where it plays the same
// information-only role.
int max_num_threads_;
// For matrix muliplications with constants parameters (i.e. weights), we can
// sometimes provide speedups by caching the "prepacked" data, for some
// additional memory cost. This flag permits the user to route all
// CpuBackendGem operations to a library that permits such an optimization
// (currently the Ruy library only).
bool use_caching_;
CpuBackendContext(const CpuBackendContext&) = delete;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_CONTEXT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_context.h | C++ | apache-2.0 | 4,096 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
#include <cstdint>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_custom_gemv.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
#ifndef TFLITE_WITH_RUY
#include "tensorflow/lite/kernels/cpu_backend_gemm_eigen.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_gemmlowp.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_x86.h"
#endif
namespace tflite {
namespace cpu_backend_gemm {
// The main entry point for CpuBackendGemm::Gemm.
//
// If TFLITE_WITH_RUY is set, CpuBackendGemm::Gemm will always go to Ruy aka
// GemmImplUsingRuy. Other cases are as follows:
//
// |Quantized (uint8)|Quantized (int8)| Float |
// TFLITE_WITH_RUY | Ruy | Ruy | Ruy |
// !TFLITE_WITH_RUY | gemmlowp | Ruy/gemmlowp* | eigen |
// * - Ruy if NEON is not available.
// On x86 platforms:
// (default) | gemmlowp | Ruy | eigen |
// TFLITE_X86_RUY_\ | Ruy | Ruy | Ruy |
// ENABLED && (AVX
// or above available)
#if !defined(TFLITE_WITH_RUY) && defined(TFLITE_X86_PLATFORM)
/* GEMM dispatch implementation for x86.
*/
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl : detail::GemmImplX86<LhsScalar, RhsScalar, AccumScalar,
DstScalar, quantization_flavor> {};
#else
/* Generic implementation using ruy.
* Non-ruy implementation will be partial specializations of this template.
*/
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl : detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar,
DstScalar, quantization_flavor> {};
#if !defined(TFLITE_WITH_RUY)
/* Specializations using gemmlowp */
template <typename SrcScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
struct GemmImpl<SrcScalar, SrcScalar, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingGemmlowp<SrcScalar, SrcScalar, std::int32_t,
DstScalar, quantization_flavor> {};
// When SrcScalar=int8 or DstScalar=int8, gemmlowp fails to compile
// outside of NEON. We avoid the compilation failure by subspecializing these
// cases, rerouting it back to ruy.
#if !defined(GEMMLOWP_NEON)
template <typename SrcScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor> {};
template <typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImpl<std::int8_t, std::int8_t, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
DstScalar, quantization_flavor> {};
template <QuantizationFlavor quantization_flavor>
struct GemmImpl<std::int8_t, std::int8_t, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
std::int8_t, quantization_flavor> {};
#endif // not GEMMLOWP_NEON
/* Specializations using Eigen */
template <>
struct GemmImpl<float, float, float, float, QuantizationFlavor::kFloatingPoint>
: detail::GemmImplUsingEigen {};
#endif // not TFLITE_WITH_RUY
#endif // not TFLITE_WITH_RUY and TFLITE_X86_PLATFORM
/* Public entry point */
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
if (!IsValidGemm(lhs_params, rhs_params, dst_params)) {
// For now, assert in debug mode, return in opt.
// TODO(b/183099395) Eliminate debug/release discrepancy by plumbing in
// TFLiteStatus so we can return an error here.
TFLITE_DCHECK(false);
return;
}
// In some cases we want to unconditionally use ruy as the backend, overriding
// the `tflite_with_ruy` setting and the platform default.
bool must_use_ruy = false;
if (context->use_caching()) {
// Only ruy supports caching of pre-packed matrices. Due to the large
// performance impact in the cases where it's typically used, this overrides
// the default.
must_use_ruy = true;
}
if (lhs_params.order != Order::kRowMajor ||
rhs_params.order != Order::kColMajor ||
dst_params.order != Order::kColMajor) {
// ruy supports all 2^3=8 combinations of storage orders with comparable
// performance. In ruy, it's only a runtime switch. In other backends
// (gemmlowp, Eigen), storage orders are template parameters, supporting
// all 8 combinations would be up to a 8-fold code size increase, so we
// prefer to force usage of ruy in these cases.
must_use_ruy = true;
}
if (must_use_ruy) {
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
return;
}
// If we did not choose to force usage of ruy above, then we may now consider
// using custom GEMV code for the matrix*vector cases.
const bool try_custom_gemv = (dst_params.cols == 1);
if (try_custom_gemv) {
// GEMV case: try a custom fast GEMV path. It will return true if it
// actually handled it.
if (detail::CustomGemv(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context)) {
return;
}
}
// Generic case: dispatch to any backend as a general GEMM.
GemmImpl<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context);
}
// Special path for gemm with raw accumulator case. i.e. AccumScalar ==
// DstScalar == int32 case.
template <typename LhsScalar, typename RhsScalar,
QuantizationFlavor quantization_flavor>
void Gemm(const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<int32_t>& dst_params, int32_t* dst_data,
const GemmParams<int32_t, int32_t, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm");
ValidateParams(lhs_params, rhs_params, dst_params, params);
// Currently, only Ruy backend supports get raw accumulator, so we use ruy
// only.
ruy::profiler::ScopeLabel label2("cpu_backend_gemm::Gemm: general GEMM");
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, int32_t, int32_t,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
}
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm.h | C++ | apache-2.0 | 8,970 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Fast Gemv (i.e. matrix*vector multiplication) paths.
// TODO(b/132094390): remove when GEMM performance is good enough on GEMV cases.
// TFLite's runtime ops concentrate as much as possible the matrix*vector
// use cases on the (matrix) * (column-vector) case, as opposed to
// (row-vector) * (matrix). So that is what we focus on optimizing here.
// Accordingly, the public cpu_backend_gemm::Gemm() entry point checks
// if we are in this (matrix) * (column-vector) case, and if so calls
// CustomGemv.
//
// cpu_backend_gemm::Gemm is also currently restricted (as enforced in
// ValidateParams) to the case where the left-hand side matrix is row-major.
//
// So the current scope of this CustomGemv function really is:
// (row-major matrix) * (column-vector).
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_CUSTOM_GEMV_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_CUSTOM_GEMV_H_
#include <stdint.h>
#include <algorithm>
#include <type_traits>
#include <vector>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
namespace tflite {
namespace cpu_backend_gemm {
namespace detail {
// CustomGemvImpl is what needs to be specialized for each custom GEMV path.
//
// It does not deal with any multi-threaded implementation detail. Rather,
// it provides the single-thread implementation to be run by each thread.
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct CustomGemvImpl {
// The number of rows of the left-hand-side matrix (and equivalently of the
// destination column-vector) that the kernel processes at a time.
// This will also be the minimum required number of rows for a Gemv shape
// to be supported by this path.
//
// Gemv implementations are expected to be able to deal with numbers of
// rows that aren't multiples of kKernelRows by possibly running the kernel
// again at an odd row_start, e.g. if kKernelRows==4, Run() should still
// support running on 7 rows by running twice: once with row_start=0 and then
// another time with row_start=3.
//
// On the other hand, gemv implementations are not expected to support
// running on fewer than kKernelRows rows. There is no interest in
// optimizing such narrow Gemv's that they are just a few dot-products.
// Supporting that would require custom kernel code only for that case.
static constexpr int kKernelRows = 1;
// Returns true if the Gemv shape is supported by Run(), provided that
// (row_end - row_start) > kKernelRows.
static bool IsSupportedGivenSufficientlyManyRows(
const MatrixParams<LhsScalar>& lhs_params,
const MatrixParams<RhsScalar>& rhs_params,
const MatrixParams<DstScalar>& dst_params,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params) {
return false;
}
// Performs the Gemv.
static void Run(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
int row_start, int row_end) {}
};
// Wraps CustomGemvImpl for multi-threaded operation.
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
class CustomGemvTask : public cpu_backend_threadpool::Task {
public:
CustomGemvTask(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
int row_start, int row_end)
: lhs_params_(lhs_params),
lhs_data_(lhs_data),
rhs_params_(rhs_params),
rhs_data_(rhs_data),
dst_params_(dst_params),
dst_data_(dst_data),
params_(params),
row_start_(row_start),
row_end_(row_end) {}
void Run() override {
using Impl = CustomGemvImpl<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>;
Impl::Run(lhs_params_, lhs_data_, rhs_params_, rhs_data_, dst_params_,
dst_data_, params_, row_start_, row_end_);
}
private:
const MatrixParams<LhsScalar>& lhs_params_;
const LhsScalar* lhs_data_;
const MatrixParams<RhsScalar>& rhs_params_;
const RhsScalar* rhs_data_;
const MatrixParams<DstScalar>& dst_params_;
DstScalar* dst_data_;
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params_;
int row_start_;
int row_end_;
};
// Either performs the requested Gemv operation and returns true,
// or immediately returns false.
//
// See the comment at the top of the file for the scope of what this handles.
// In summary: (row-major matrix) * (column-vector).
//
// Here is only high-level logic.
// The actual implementation details are in specializations of
// CustomGemvImpl.
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
bool CustomGemv(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::profiler::ScopeLabel label("cpu_backend_gemm::Gemm: CustomGemv");
using Impl = CustomGemvImpl<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>;
if (lhs_params.rows < Impl::kKernelRows) {
return false;
}
if (!Impl::IsSupportedGivenSufficientlyManyRows(lhs_params, rhs_params,
dst_params, params)) {
return false;
}
TFLITE_DCHECK_GE(lhs_params.rows, Impl::kKernelRows);
int thread_count = LegacyHowManyThreads<Impl::kKernelRows>(
context->max_num_threads(), dst_params.rows, dst_params.cols,
lhs_params.cols);
if (thread_count == 1) {
Impl::Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data,
params, 0, lhs_params.rows);
} else {
using Task = CustomGemvTask<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>;
std::vector<Task> tasks;
tasks.reserve(thread_count);
const int kRowsPerThread =
RoundUp<Impl::kKernelRows>(CeilQuotient(dst_params.rows, thread_count));
int row_start = 0;
for (int i = 0; i < thread_count; i++) {
int row_end = std::min(dst_params.rows, row_start + kRowsPerThread);
tasks.emplace_back(lhs_params, lhs_data, rhs_params, rhs_data, dst_params,
dst_data, params, row_start, row_end);
row_start = row_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), context);
}
return true;
}
// USE_NEON still allows for x86 where we may be using the arm_neon_sse.h
// wrapper implementing NEON intrinsics on top of SSE4 intrinsics.
#ifdef USE_NEON
// Some NEON helper functions used by CustomGemvImpl specializations below,
// allowing for some type genericity in them.
inline int16x8x2_t Load16AndSubtractZeroPoint(const std::uint8_t* src,
std::uint8_t zero_point) {
uint8x16_t src_u8 = vld1q_u8(src);
int16x8_t src_s16_0 = vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(src_u8)));
int16x8_t src_s16_1 = vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(src_u8)));
int16x8x2_t result;
int16x8_t zero_point_vec = vdupq_n_s16(zero_point);
result.val[0] = vsubq_s16(src_s16_0, zero_point_vec);
result.val[1] = vsubq_s16(src_s16_1, zero_point_vec);
return result;
}
inline int16x8x2_t Load16AndSubtractZeroPoint(const std::int8_t* src,
std::int8_t zero_point) {
int8x16_t src_s8 = vld1q_s8(src);
int16x8_t src_s16_0 = vmovl_s8(vget_low_s8(src_s8));
int16x8_t src_s16_1 = vmovl_s8(vget_high_s8(src_s8));
int16x8x2_t result;
int16x8_t zero_point_vec = vdupq_n_s16(zero_point);
result.val[0] = vsubq_s16(src_s16_0, zero_point_vec);
result.val[1] = vsubq_s16(src_s16_1, zero_point_vec);
return result;
}
inline int16x8_t Load8AndSubtractZeroPoint(const std::uint8_t* src,
std::uint8_t zero_point) {
uint8x8_t src_u8 = vld1_u8(src);
int16x8_t src_s16 = vreinterpretq_s16_u16(vmovl_u8(src_u8));
int16x8_t zero_point_vec = vdupq_n_s16(zero_point);
return vsubq_s16(src_s16, zero_point_vec);
}
inline int16x8_t Load8AndSubtractZeroPoint(const std::int8_t* src,
std::int8_t zero_point) {
int8x8_t src_s8 = vld1_s8(src);
int16x8_t src_s16 = vmovl_s8(src_s8);
int16x8_t zero_point_vec = vdupq_n_s16(zero_point);
return vsubq_s16(src_s16, zero_point_vec);
}
inline void ClampAndStore(int32x4_t src, std::uint8_t clamp_min,
std::uint8_t clamp_max, std::uint8_t* dst) {
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(src);
// Narrow values down to 8 bit unsigned, saturating.
uint8x8_t res8 = vqmovun_s16(vcombine_s16(res16, res16));
// Apply the clamping from the activation function
res8 = vmax_u8(res8, vdup_n_u8(clamp_min));
res8 = vmin_u8(res8, vdup_n_u8(clamp_max));
// Store results to destination.
vst1_lane_u8(dst + 0, res8, 0);
vst1_lane_u8(dst + 1, res8, 1);
vst1_lane_u8(dst + 2, res8, 2);
vst1_lane_u8(dst + 3, res8, 3);
}
inline void ClampAndStore(int32x4_t src, std::int8_t clamp_min,
std::int8_t clamp_max, std::int8_t* dst) {
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(src);
// Narrow values down to 8 bit unsigned, saturating.
int8x8_t res8 = vqmovn_s16(vcombine_s16(res16, res16));
// Apply the clamping from the activation function
res8 = vmax_s8(res8, vdup_n_s8(clamp_min));
res8 = vmin_s8(res8, vdup_n_s8(clamp_max));
// Store results to destination.
vst1_lane_s8(dst + 0, res8, 0);
vst1_lane_s8(dst + 1, res8, 1);
vst1_lane_s8(dst + 2, res8, 2);
vst1_lane_s8(dst + 3, res8, 3);
}
inline void ClampAndStore(int32x4_t src, std::int16_t clamp_min,
std::int16_t clamp_max, std::int16_t* dst) {
// Narrow values down to 16 bit signed.
int16x4_t res16 = vqmovn_s32(src);
// Apply the clamping from the activation function
res16 = vmax_s16(res16, vdup_n_s16(clamp_min));
res16 = vmin_s16(res16, vdup_n_s16(clamp_max));
// Store results to destination.
vst1_lane_s16(dst + 0, res16, 0);
vst1_lane_s16(dst + 1, res16, 1);
vst1_lane_s16(dst + 2, res16, 2);
vst1_lane_s16(dst + 3, res16, 3);
}
template <typename LhsScalar, typename RhsScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
struct CustomGemvImpl<LhsScalar, RhsScalar, std::int32_t, DstScalar,
quantization_flavor> {
// This partial template specialization is less generic than its declaration
// implies: it assumes the following constraints on its free template
// parameters. We guard these assumptions in the following static_assert's.
static_assert(std::is_same<LhsScalar, std::uint8_t>::value ||
std::is_same<LhsScalar, std::int8_t>::value,
"");
static_assert(std::is_same<RhsScalar, std::uint8_t>::value ||
std::is_same<RhsScalar, std::int8_t>::value,
"");
static_assert(std::is_same<DstScalar, std::uint8_t>::value ||
std::is_same<DstScalar, std::int8_t>::value ||
std::is_same<DstScalar, std::int16_t>::value,
"");
static_assert(quantization_flavor ==
QuantizationFlavor::kIntegerWithUniformMultiplier ||
quantization_flavor ==
QuantizationFlavor::kIntegerWithPerRowMultiplier,
"");
// This implementation's inner loop processes 4 rows of the left-hand side
// matrix at a time.
static constexpr int kKernelRows = 4;
static bool IsSupportedGivenSufficientlyManyRows(
const MatrixParams<LhsScalar>& lhs_params,
const MatrixParams<RhsScalar>& rhs_params,
const MatrixParams<DstScalar>& dst_params,
const GemmParams<std::int32_t, DstScalar, quantization_flavor>& params) {
// The kernel processes at least 8 LHS columns at once to fill NEON
// registers. The leftovers-handling code at the end works by loading a
// partially overlapping final register by walking back by a few (<8) values
// to avoid running past the row's end. This relies on there being
// at least 8 LHS columns.
return lhs_params.cols >= 8;
}
static void Run(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<std::int32_t, DstScalar, quantization_flavor>& params,
int row_start, int row_end) {
// Handle kKernelRows ( == 4) rows of the left-hand side matrix at each
// iteration of this for loop.
TFLITE_DCHECK_GE(row_end - row_start, kKernelRows);
for (int row = row_start; row < row_end; row += kKernelRows) {
// Here is the magic where we allow this kernel to handle any odd number
// of rows as long as it's >= kKernelRows: the last group of `kKernelRows`
// rows will be nudged to fit, possibly by starting at an odd value of
// `row`.
row = std::min(row, row_end - kKernelRows);
const LhsScalar* filter_ptr = lhs_data + row * lhs_params.cols;
static constexpr int kCacheLineSize = 64;
for (int k = 0; k < rhs_params.rows;
k += kCacheLineSize / sizeof(RhsScalar)) {
optimized_ops_preload_l1_keep(rhs_data + k);
}
// kPreloadAhead is empirically determined.
// End-to-end latency (ms) on mobilenet_v2_0.35_96_8bit, 1 thread,
// on Qualcomm S855:
//
// kPreloadAhead | big core | little core
// --------------+----------+------------
// 64 | 1.26 | 5.45
// 128 | 1.23 | 5.01
// 256 | 1.18 | 4.9
// 512 | 1.18 | 5.45
// 1024 | 1.18 | 6.5
// no prefetch | 1.25 | 8.1
static constexpr int kPreloadAhead = 256;
// 4 accumulator registers, one for each row being processed.
// Each has 4 int32 lanes that corresponds to columns modulo 4, and
// will need to be horizontally reduced at the end.
int32x4_t acc0 = vdupq_n_s32(0);
int32x4_t acc1 = acc0;
int32x4_t acc2 = acc0;
int32x4_t acc3 = acc0;
int in = 0;
// As much as possible, handle 16 columns of the left-hand side matrix
// at a time. This allows for decent NEON implementation.
for (; in <= lhs_params.cols - 16; in += 16) {
const LhsScalar* local_filter_ptr = filter_ptr;
int16x8x2_t input_val =
Load16AndSubtractZeroPoint(rhs_data + in, rhs_params.zero_point);
int16x8x2_t filter_val_0 =
Load16AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(LhsScalar));
local_filter_ptr += lhs_params.cols;
int16x8x2_t filter_val_1 =
Load16AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(LhsScalar));
local_filter_ptr += lhs_params.cols;
int16x8x2_t filter_val_2 =
Load16AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(LhsScalar));
local_filter_ptr += lhs_params.cols;
int16x8x2_t filter_val_3 =
Load16AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(LhsScalar));
filter_ptr += 16;
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0.val[0]),
vget_low_s16(input_val.val[0]));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1.val[0]),
vget_low_s16(input_val.val[0]));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2.val[0]),
vget_low_s16(input_val.val[0]));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3.val[0]),
vget_low_s16(input_val.val[0]));
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0.val[1]),
vget_low_s16(input_val.val[1]));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1.val[1]),
vget_low_s16(input_val.val[1]));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2.val[1]),
vget_low_s16(input_val.val[1]));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3.val[1]),
vget_low_s16(input_val.val[1]));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0.val[0]),
vget_high_s16(input_val.val[0]));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1.val[0]),
vget_high_s16(input_val.val[0]));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2.val[0]),
vget_high_s16(input_val.val[0]));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3.val[0]),
vget_high_s16(input_val.val[0]));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0.val[1]),
vget_high_s16(input_val.val[1]));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1.val[1]),
vget_high_s16(input_val.val[1]));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2.val[1]),
vget_high_s16(input_val.val[1]));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3.val[1]),
vget_high_s16(input_val.val[1]));
}
// Less that 16 values remain. Try to handle 8 more.
if (in <= lhs_params.cols - 8) {
int16x8_t input_val =
Load8AndSubtractZeroPoint(rhs_data + in, rhs_params.zero_point);
int16x8_t filter_val_0 = Load8AndSubtractZeroPoint(
filter_ptr + 0 * lhs_params.cols, lhs_params.zero_point);
int16x8_t filter_val_1 = Load8AndSubtractZeroPoint(
filter_ptr + 1 * lhs_params.cols, lhs_params.zero_point);
int16x8_t filter_val_2 = Load8AndSubtractZeroPoint(
filter_ptr + 2 * lhs_params.cols, lhs_params.zero_point);
int16x8_t filter_val_3 = Load8AndSubtractZeroPoint(
filter_ptr + 3 * lhs_params.cols, lhs_params.zero_point);
filter_ptr += 8;
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0),
vget_low_s16(input_val));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1),
vget_low_s16(input_val));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2),
vget_low_s16(input_val));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3),
vget_low_s16(input_val));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0),
vget_high_s16(input_val));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1),
vget_high_s16(input_val));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2),
vget_high_s16(input_val));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3),
vget_high_s16(input_val));
in += 8;
}
// Less than 8 values remain. Handle the remaining values
// in one more copy of the above code handling 8, where we
// walk back a few values to be able to load 8 values without
// overrunning the buffer. This is where we make use of the requirement
// (see IsSupportedGivenSufficientlyManyRows) that there at least
// 8 LHS columns.
if (in < lhs_params.cols) {
// `back` is how many entries to walk back by.
// Its value is necessarily between 1 and 7.
const int back = in + 8 - lhs_params.cols;
TFLITE_DCHECK_GE(back, 1);
TFLITE_DCHECK_LE(back, 7);
// Load 8 values as usual.
int16x8_t input_val = Load8AndSubtractZeroPoint(
rhs_data + lhs_params.cols - 8, rhs_params.zero_point);
const LhsScalar* local_filter_ptr = filter_ptr - back;
filter_ptr += lhs_params.cols - in;
int16x8_t filter_val_0 =
Load8AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
local_filter_ptr += lhs_params.cols;
int16x8_t filter_val_1 =
Load8AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
local_filter_ptr += lhs_params.cols;
int16x8_t filter_val_2 =
Load8AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
local_filter_ptr += lhs_params.cols;
int16x8_t filter_val_3 =
Load8AndSubtractZeroPoint(local_filter_ptr, lhs_params.zero_point);
// Now zero out the `back` first entries of input_val.
// vsetq_lane_s16 takes a literal index, so we need unrolled code.
switch (back) {
case 7:
input_val = vsetq_lane_s16(0, input_val, 6);
[[clang::fallthrough]];
case 6:
input_val = vsetq_lane_s16(0, input_val, 5);
[[clang::fallthrough]];
case 5:
input_val = vsetq_lane_s16(0, input_val, 4);
[[clang::fallthrough]];
case 4:
input_val = vsetq_lane_s16(0, input_val, 3);
[[clang::fallthrough]];
case 3:
input_val = vsetq_lane_s16(0, input_val, 2);
[[clang::fallthrough]];
case 2:
input_val = vsetq_lane_s16(0, input_val, 1);
[[clang::fallthrough]];
default:
input_val = vsetq_lane_s16(0, input_val, 0);
}
// Multiply-accumulate 8 values as usual. The `back` first lanes
// of filter_val_* are junk, but it doesn't matter since they get
// multiplied by the zeros that we just wrote in the corresponding
// lanes of input_val.
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0),
vget_low_s16(input_val));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1),
vget_low_s16(input_val));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2),
vget_low_s16(input_val));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3),
vget_low_s16(input_val));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0),
vget_high_s16(input_val));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1),
vget_high_s16(input_val));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2),
vget_high_s16(input_val));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3),
vget_high_s16(input_val));
}
// Horizontally reduce accumulators
int32x2_t pairwise_reduced_acc_0 =
vpadd_s32(vget_low_s32(acc0), vget_high_s32(acc0));
int32x2_t pairwise_reduced_acc_1 =
vpadd_s32(vget_low_s32(acc1), vget_high_s32(acc1));
int32x2_t pairwise_reduced_acc_2 =
vpadd_s32(vget_low_s32(acc2), vget_high_s32(acc2));
int32x2_t pairwise_reduced_acc_3 =
vpadd_s32(vget_low_s32(acc3), vget_high_s32(acc3));
const int32x2_t reduced_lo =
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
const int32x2_t reduced_hi =
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
// End of horizontal reduction: now `reduced` is a single int32x4
// containing the 4 int32 accumulators corresponding to the 4 rows
// being processed.
// Add bias values.
if (params.bias) {
int32x4_t bias_vec = vld1q_s32(params.bias + row);
reduced = vaddq_s32(reduced, bias_vec);
}
// Get multiplier parameters.
int32x4_t multiplier_fixedpoint;
int32x4_t multiplier_exponent;
if (quantization_flavor ==
QuantizationFlavor::kIntegerWithPerRowMultiplier) {
multiplier_exponent =
vld1q_s32(params.multiplier_exponent_perchannel + row);
multiplier_fixedpoint =
vld1q_s32(params.multiplier_fixedpoint_perchannel + row);
} else {
multiplier_exponent = vdupq_n_s32(params.multiplier_exponent);
multiplier_fixedpoint = vdupq_n_s32(params.multiplier_fixedpoint);
}
// If positive exponent, shift left.
int32x4_t exponent_positive_part =
vmaxq_s32(multiplier_exponent, vdupq_n_s32(0));
reduced = vshlq_s32(reduced, exponent_positive_part);
// Multiply by the fixed-point multiplier.
reduced = vqrdmulhq_s32(reduced, multiplier_fixedpoint);
// If negative exponent, rounding-shift-right.
int32x4_t exponent_negative_part =
vminq_s32(multiplier_exponent, vdupq_n_s32(0));
reduced = vrshlq_s32(reduced, exponent_negative_part);
// Add the output offset.
const int32x4_t output_offset_vec = vdupq_n_s32(dst_params.zero_point);
reduced = vaddq_s32(reduced, output_offset_vec);
// Finally, clamp and store to the destination.
ClampAndStore(reduced, params.clamp_min, params.clamp_max,
dst_data + row);
}
}
};
// The float specialization below is unconditionally faster than ruy
// because ruy does not currently have any Gemv path.
// But it is not unconditionally faster than Eigen, which is what is used
// unless TFLITE_WITH_RUY is defined. Indeed, Eigen has decently efficient
// Gemv paths, and they may use AVX instructions, while the present
// NEON intrinsics code maps at best to SSE4 on x86.
#ifdef TFLITE_WITH_RUY
// We want to use fused multiply-add when it's available (that is, on A64
// unconditionally and on A32 with VFPv4) because it's often faster, and
// because non-fused seems not to be available in A64 so a conscientious
// compiler might emit slow code (separate mul and add instructions) in order to
// implement the vmlaq_f32 intrinsic with strict bit-for-bit exactness on A64.
// (Compilers seem to be generating a fused fmla instruction at the moment,
// but that could change).
//
// We still want to support building for A32 without VFPv4.
inline float32x4_t mul_add(float32x4_t acc, float32x4_t lhs, float32x4_t rhs) {
#ifdef __ARM_FEATURE_FMA
return vfmaq_f32(acc, lhs, rhs);
#else
return vmlaq_f32(acc, lhs, rhs);
#endif
}
template <>
struct CustomGemvImpl<float, float, float, float,
QuantizationFlavor::kFloatingPoint> {
// This implementation's inner loop processes 4 rows of the left-hand side
// matrix at a time.
static constexpr int kKernelRows = 4;
static bool IsSupportedGivenSufficientlyManyRows(
const MatrixParams<float>& lhs_params,
const MatrixParams<float>& rhs_params,
const MatrixParams<float>& dst_params,
const GemmParams<float, float>& params) {
// The kernel processes 4 LHS columns at once to fill float32x4 registers.
// The leftovers-handling code at the end works by loading a partially
// overlapping final register by walking back by a few (<4) floats
// to avoid running past the row's end. This relies on there being
// at least 4 LHS columns.
return lhs_params.cols >= 4;
}
static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
const MatrixParams<float>& rhs_params, const float* rhs_data,
const MatrixParams<float>& dst_params, float* dst_data,
const GemmParams<float, float>& params, int row_start,
int row_end) {
// Handle kKernelRows ( == 4) rows of the left-hand side matrix at each
// iteration of this for loop.
TFLITE_DCHECK_GE(row_end - row_start, kKernelRows);
for (int row = row_start; row < row_end; row += kKernelRows) {
// Here is the magic where we allow this kernel to handle any odd number
// of rows as long as it's >= kKernelRows: the last group of `kKernelRows`
// rows will be nudged to fit, possibly by starting at an odd value of
// `row`.
row = std::min(row, row_end - kKernelRows);
const float* filter_ptr = lhs_data + row * lhs_params.cols;
static constexpr int kCacheLineSize = 64;
for (int k = 0; k < rhs_params.rows;
k += kCacheLineSize / sizeof(float)) {
optimized_ops_preload_l1_keep(rhs_data + k);
}
// kPreloadAhead is empirically determined.
// End-to-end latency (ms) on mobilenet_v2_0.35_96_float, 1 thread,
// on Qualcomm S855:
//
// kPreloadAhead | big core | little core
// --------------+----------+------------
// 64 | 2.4 | 15.2
// 128 | 2.15 | 12.9
// 256 | 2 | 12.9
// 512 | 2.08 | 13.3
// 1024 | 2.05 | 14.7
// no prefetch | 2.1 | 28
static constexpr int kPreloadAhead = 256;
// 4 accumulator registers, one for each row being processed.
// Each has 4 float32 lanes that corresponds to columns modulo 4, and
// will need to be horizontally reduced at the end.
float32x4_t acc0 = vdupq_n_f32(0);
float32x4_t acc1 = acc0;
float32x4_t acc2 = acc0;
float32x4_t acc3 = acc0;
int in = 0;
// As much as possible, handle 4 columns of the left-hand side matrix
// at a time. This allows for decent NEON implementation.
for (; in <= lhs_params.cols - 4; in += 4) {
float32x4_t input_val = vld1q_f32(rhs_data + in);
const float* local_filter_ptr = filter_ptr;
float32x4_t filter_val_0 = vld1q_f32(local_filter_ptr);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(float));
local_filter_ptr += lhs_params.cols;
float32x4_t filter_val_1 = vld1q_f32(local_filter_ptr);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(float));
local_filter_ptr += lhs_params.cols;
float32x4_t filter_val_2 = vld1q_f32(local_filter_ptr);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(float));
local_filter_ptr += lhs_params.cols;
float32x4_t filter_val_3 = vld1q_f32(local_filter_ptr);
optimized_ops_preload_l1_stream(local_filter_ptr +
kPreloadAhead / sizeof(float));
filter_ptr += 4;
acc0 = mul_add(acc0, filter_val_0, input_val);
acc1 = mul_add(acc1, filter_val_1, input_val);
acc2 = mul_add(acc2, filter_val_2, input_val);
acc3 = mul_add(acc3, filter_val_3, input_val);
}
// Less than 4 values remain. Handle the remaining values
// in one more copy of the above code handling 4, where we
// walk back a few values to be able to load 4 values without
// overrunning the buffer. This is where we make use of the requirement
// (see IsSupportedGivenSufficientlyManyRows) that there at least
// 4 LHS columns.
if (in < lhs_params.cols) {
// `back` is how many entries to walk back by.
// Its value is necessarily between 1 and 3.
const int back = in + 4 - lhs_params.cols;
TFLITE_DCHECK_GE(back, 1);
TFLITE_DCHECK_LE(back, 3);
// Load 4 values as usual.
float32x4_t input_val = vld1q_f32(rhs_data + lhs_params.cols - 4);
const float* local_filter_ptr = filter_ptr - back;
filter_ptr += lhs_params.cols - in;
float32x4_t filter_val_0 = vld1q_f32(local_filter_ptr);
local_filter_ptr += lhs_params.cols;
float32x4_t filter_val_1 = vld1q_f32(local_filter_ptr);
local_filter_ptr += lhs_params.cols;
float32x4_t filter_val_2 = vld1q_f32(local_filter_ptr);
local_filter_ptr += lhs_params.cols;
float32x4_t filter_val_3 = vld1q_f32(local_filter_ptr);
// Now zero out the `back` first entries of input_val.
// vsetq_lane_f32 takes a literal index, so we need unrolled code.
switch (back) {
case 3:
input_val = vsetq_lane_f32(0, input_val, 2);
[[clang::fallthrough]];
case 2:
input_val = vsetq_lane_f32(0, input_val, 1);
[[clang::fallthrough]];
default:
input_val = vsetq_lane_f32(0, input_val, 0);
}
// Multiply-accumulate 4 values as usual. The `back` first lanes
// of filter_val_* are junk, but it doesn't matter since they get
// multiplied by the zeros that we just wrote in the corresponding
// lanes of input_val.
acc0 = mul_add(acc0, filter_val_0, input_val);
acc1 = mul_add(acc1, filter_val_1, input_val);
acc2 = mul_add(acc2, filter_val_2, input_val);
acc3 = mul_add(acc3, filter_val_3, input_val);
}
// Horizontally reduce accumulators
float32x2_t pairwise_reduced_acc_0 =
vpadd_f32(vget_low_f32(acc0), vget_high_f32(acc0));
float32x2_t pairwise_reduced_acc_1 =
vpadd_f32(vget_low_f32(acc1), vget_high_f32(acc1));
float32x2_t pairwise_reduced_acc_2 =
vpadd_f32(vget_low_f32(acc2), vget_high_f32(acc2));
float32x2_t pairwise_reduced_acc_3 =
vpadd_f32(vget_low_f32(acc3), vget_high_f32(acc3));
float32x2_t reduced_lo =
vpadd_f32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
float32x2_t reduced_hi =
vpadd_f32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
float32x4_t reduced = vcombine_f32(reduced_lo, reduced_hi);
// End of horizontal reduction: now `reduced` is a single float32x4
// containing the 4 float32 accumulators corresponding to the 4 rows
// being processed.
if (params.bias) {
// Add bias values.
reduced = vaddq_f32(reduced, vld1q_f32(params.bias + row));
}
// Clamp and store to destination.
reduced = vminq_f32(reduced, vdupq_n_f32(params.clamp_max));
reduced = vmaxq_f32(reduced, vdupq_n_f32(params.clamp_min));
vst1q_f32(dst_data + row, reduced);
}
}
};
#endif // TFLITE_WITH_RUY
#endif // USE_NEON
} // namespace detail
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_CUSTOM_GEMV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm_custom_gemv.h | C++ | apache-2.0 | 36,518 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_EIGEN_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_EIGEN_H_
#ifndef TFLITE_WITH_RUY
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
namespace tflite {
namespace cpu_backend_gemm {
namespace detail {
struct GemmImplUsingEigen {
static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
const MatrixParams<float>& rhs_params, const float* rhs_data,
const MatrixParams<float>& dst_params, float* dst_data,
const GemmParams<float, float>& params,
CpuBackendContext* /* context */);
};
} // namespace detail
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // not TFLITE_WITH_RUY
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_EIGEN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm_eigen.h | C++ | apache-2.0 | 1,549 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_GEMMLOWP_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_GEMMLOWP_H_
#include <tuple>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#ifndef TFLITE_WITH_RUY
#include <cstdint>
#include <type_traits>
#include "public/gemmlowp.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
namespace tflite {
namespace cpu_backend_gemm {
namespace detail {
template <typename DstScalar>
struct GemmlowpSaturatingCastStage {};
template <>
struct GemmlowpSaturatingCastStage<std::uint8_t> {
using Type = gemmlowp::OutputStageSaturatingCastToUint8;
};
template <>
struct GemmlowpSaturatingCastStage<std::int8_t> {
using Type = gemmlowp::OutputStageSaturatingCastToInt8;
};
template <>
struct GemmlowpSaturatingCastStage<std::int16_t> {
using Type = gemmlowp::OutputStageSaturatingCastToInt16;
};
template <typename DstScalar>
struct GemmlowpBitDepthParams {};
template <>
struct GemmlowpBitDepthParams<std::uint8_t> {
using Type = gemmlowp::L8R8WithLhsNonzeroBitDepthParams;
};
template <>
struct GemmlowpBitDepthParams<std::int8_t> {
using Type = gemmlowp::SignedL8R8WithLhsNonzeroBitDepthParams;
};
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImplUsingGemmlowp {};
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
struct GemmImplUsingGemmlowp<
LhsScalar, RhsScalar, AccumScalar, DstScalar,
QuantizationFlavor::kIntegerWithUniformMultiplier> {
static_assert(std::is_same<LhsScalar, RhsScalar>::value, "");
static_assert(std::is_same<AccumScalar, std::int32_t>::value, "");
using SrcScalar = LhsScalar;
static void Run(
const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data,
const MatrixParams<SrcScalar>& rhs_params, const SrcScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<std::int32_t, DstScalar,
QuantizationFlavor::kIntegerWithUniformMultiplier>&
params,
CpuBackendContext* context) {
gemmlowp::MatrixMap<const SrcScalar, gemmlowp::MapOrder::RowMajor>
gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols);
gemmlowp::MatrixMap<const SrcScalar, gemmlowp::MapOrder::ColMajor>
gemmlowp_rhs(rhs_data, rhs_params.rows, rhs_params.cols);
gemmlowp::MatrixMap<DstScalar, gemmlowp::MapOrder::ColMajor> gemmlowp_dst(
dst_data, dst_params.rows, dst_params.cols);
using ColVectorMap =
gemmlowp::VectorMap<const int32, gemmlowp::VectorShape::Col>;
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent scale_stage;
scale_stage.result_offset_after_shift = dst_params.zero_point;
scale_stage.result_fixedpoint_multiplier = params.multiplier_fixedpoint;
scale_stage.result_exponent = params.multiplier_exponent;
using SaturatingCastStageType =
typename GemmlowpSaturatingCastStage<DstScalar>::Type;
gemmlowp::OutputStageClamp clamp_stage;
clamp_stage.min = params.clamp_min;
clamp_stage.max = params.clamp_max;
SaturatingCastStageType saturating_cast_stage;
using BitDepthParams = typename GemmlowpBitDepthParams<SrcScalar>::Type;
if (params.bias) {
ColVectorMap bias_vector(params.bias, lhs_params.rows);
gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage;
bias_addition_stage.bias_vector = bias_vector;
auto output_pipeline = std::make_tuple(
bias_addition_stage, scale_stage, clamp_stage, saturating_cast_stage);
gemmlowp::GemmWithOutputPipeline<SrcScalar, DstScalar, BitDepthParams>(
context->gemmlowp_context(), gemmlowp_lhs, gemmlowp_rhs,
&gemmlowp_dst, -lhs_params.zero_point, -rhs_params.zero_point,
output_pipeline);
} else {
auto output_pipeline =
std::make_tuple(scale_stage, clamp_stage, saturating_cast_stage);
gemmlowp::GemmWithOutputPipeline<SrcScalar, DstScalar, BitDepthParams>(
context->gemmlowp_context(), gemmlowp_lhs, gemmlowp_rhs,
&gemmlowp_dst, -lhs_params.zero_point, -rhs_params.zero_point,
output_pipeline);
}
}
};
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
struct GemmImplUsingGemmlowp<LhsScalar, RhsScalar, AccumScalar, DstScalar,
QuantizationFlavor::kIntegerWithPerRowMultiplier> {
static_assert(std::is_same<LhsScalar, RhsScalar>::value, "");
static_assert(std::is_same<AccumScalar, std::int32_t>::value, "");
using SrcScalar = LhsScalar;
static void Run(
const MatrixParams<SrcScalar>& lhs_params, const SrcScalar* lhs_data,
const MatrixParams<SrcScalar>& rhs_params, const SrcScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<std::int32_t, DstScalar,
QuantizationFlavor::kIntegerWithPerRowMultiplier>&
params,
CpuBackendContext* context) {
// gemmlowp support for this per-channel path is limited to NEON.
// We fall back to ruy outside of NEON.
#ifdef GEMMLOWP_NEON
gemmlowp::MatrixMap<const SrcScalar, gemmlowp::MapOrder::RowMajor>
gemmlowp_lhs(lhs_data, lhs_params.rows, lhs_params.cols);
gemmlowp::MatrixMap<const SrcScalar, gemmlowp::MapOrder::ColMajor>
gemmlowp_rhs(rhs_data, rhs_params.rows, rhs_params.cols);
gemmlowp::MatrixMap<DstScalar, gemmlowp::MapOrder::ColMajor> gemmlowp_dst(
dst_data, dst_params.rows, dst_params.cols);
using ColVectorMap =
gemmlowp::VectorMap<const int32, gemmlowp::VectorShape::Col>;
ColVectorMap bias_vector(params.bias, lhs_params.rows);
gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage;
bias_addition_stage.bias_vector = bias_vector;
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponentPC<
gemmlowp::VectorShape::Col>
scale_stage;
scale_stage.result_offset_after_shift = dst_params.zero_point;
scale_stage.result_fixedpoint_multiplier =
ColVectorMap(params.multiplier_fixedpoint_perchannel, dst_params.rows);
scale_stage.result_exponent =
ColVectorMap(params.multiplier_exponent_perchannel, dst_params.rows);
using SaturatingCastStageType =
typename GemmlowpSaturatingCastStage<DstScalar>::Type;
gemmlowp::OutputStageClamp clamp_stage;
clamp_stage.min = params.clamp_min;
clamp_stage.max = params.clamp_max;
SaturatingCastStageType saturating_cast_stage;
auto output_pipeline = std::make_tuple(bias_addition_stage, scale_stage,
clamp_stage, saturating_cast_stage);
using BitDepthParams = typename GemmlowpBitDepthParams<SrcScalar>::Type;
gemmlowp::GemmWithOutputPipeline<SrcScalar, DstScalar, BitDepthParams>(
context->gemmlowp_context(), gemmlowp_lhs, gemmlowp_rhs, &gemmlowp_dst,
-lhs_params.zero_point, -rhs_params.zero_point, output_pipeline);
#else
GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar, DstScalar,
QuantizationFlavor::kIntegerWithPerRowMultiplier>::
Run(lhs_params, lhs_data, rhs_params, rhs_data, dst_params, dst_data,
params, context);
#endif
}
};
} // namespace detail
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // not TFLITE_WITH_RUY
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_GEMMLOWP_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm_gemmlowp.h | C++ | apache-2.0 | 8,333 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_PARAMS_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_PARAMS_H_
#include <cstdint>
#include <limits>
#include <type_traits>
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace cpu_backend_gemm {
// Matrix storage order: column-major or row-major.
enum class Order { kColMajor, kRowMajor };
enum class CachePolicy : std::uint8_t {
kNeverCache,
kCacheIfLargeSpeedup,
kAlwaysCache,
};
inline CachePolicy DefaultCachePolicy(bool is_constant_data) {
return is_constant_data ? CachePolicy::kCacheIfLargeSpeedup
: CachePolicy::kNeverCache;
}
// MatrixParams encapsulates the parameters that Gemm needs about each
// matrix, besides the buffer data pointer.
// Compare to ruy::Matrix, which also encapsulates the data pointer.
// Rationale for leaving the data pointer out of here: doing so
// requires complicated const-correctness mechanics. See
// ruy::ConstCheckingPtr.
template <typename Scalar>
struct MatrixParams {
// Storage layout order. For now we only do plain linear non-strided
// layout. It would be easy to support a stride if needed.
Order order = Order::kColMajor;
// Number of rows of the matrix.
int rows = 0;
// Number of columns of the matrix.
int cols = 0;
// The zero_point, i.e. which Scalar value is to be interpreted as zero.
// When Scalar is floating-point, this must be 0.
Scalar zero_point = 0;
// When the data pointed to by this matrix is constant data, so that it is
// valid to assume that equality of pointers implies equality of data,
// a CachePolicy may be used instead of the default kNeverCache,
// which will enable ruy to take advantage of this constancy of the data to
// cache the packing work, which can be a large speedup in matrix*vector
// and other narrow shapes.
CachePolicy cache_policy = CachePolicy::kNeverCache;
};
// Enumeration of broad categories of Gemm.
//
// The primary reason for this to exist is to allow Gemm to compile
// only uniform-quantized or only per-channel-quantized code paths.
// This is unneeded with ruy as the back-end, as this is only a runtime
// difference in ruy, but with gemmlowp these really are separate code
// paths and templatizing in a QuantizationFlavor is necessary to avoid
// compiling unused gemmlowp code. Indeed, TFLite currently uses
// uint8 with uniform quantization and int8 with per-channel quantization,
// and does not use uint8 with per-channel. We want to avoid compiling
// the gemmlowp uint8 per-channel path when gemmlowp is the back-end.
//
// It's possible to drop this in the future if gemmlowp goes away and no
// other then-relevant backend library handles quantized paths in a way that
// requires knowing this at compile-time.
enum class QuantizationFlavor {
// Floating-point Gemm: the accumulators are not multiplied by any
// 'multiplier'.
kFloatingPoint,
// Quantized Gemm using a single multiplier for all accumulators.
kIntegerWithUniformMultiplier,
// Quantized Gemm using a separate multipliers for accumulators of each
// row of the destination matrix. This is what is called 'per-channel'
// in GemmParams. Here we use the more specific 'per-row' terminology
// to allow for the possibility of 'per-column' in the future, and to
// allow for that to be a separate code path in some back-end such as
// gemmlowp.
kIntegerWithPerRowMultiplier
};
// Additional parameters that Gemm needs, beyond what falls into
// the MatrixParams that it takes. Compare to ruy::Spec.
//
// Decoupling AccumScalar from DstScalar (rather than deducing it from that)
// is useful future-proofing. Think of a float16 path using float32 accum.
//
// QuantizationFlavor is passed here even though it's technically not used
// in this class. This is so that we retain the ability in the future to
// specialize this class for quantization flavor, and this allows for
// Gemm to be templatized in quantization_flavor via the GemmParams that it
// takes, allowing for automatic template parameter deduction to take place,
// so that most call sites don't need to specify a QuantizationFlavor
// (only those that need perchannel quantization do).
template <typename AccumScalar, typename DstScalar,
QuantizationFlavor quantization_flavor =
std::is_floating_point<AccumScalar>::value
? QuantizationFlavor::kFloatingPoint
: QuantizationFlavor::kIntegerWithUniformMultiplier>
struct GemmParams {
// Only for non-floating-point cases. The fixed-point part (i.e. the mantissa)
// of the multiplier by which accumulators are multiplied before being casted
// to the destination type.
AccumScalar multiplier_fixedpoint = 0;
// Only for non-floating-point cases. The exponent part of the aforementioned
// multiplier.
int multiplier_exponent = 0;
// Per-channel variant of multiplier_fixedpoint. If not nullptr, this must
// point to a buffer of as many values as there are rows in the destination
// matrix. Each row of the destination matrix will use the corresponding
// buffer element instead of multiplier_fixedpoint.
const AccumScalar* multiplier_fixedpoint_perchannel = nullptr;
// Per-channel variant of multiplier_exponent. If not nullptr, this must
// point to a buffer of as many values as there are rows in the destination
// matrix. Each row of the destination matrix will use the corresponding
// buffer element instead of multiplier_exponent.
//
// Either none or both of multiplier_exponent_perchannel and
// multiplier_fixedpoint_perchannel must be nullptr.
const int* multiplier_exponent_perchannel = nullptr;
// The bias vector data, if not null.
const AccumScalar* bias = nullptr;
// min clamp bound of destination values.
DstScalar clamp_min = std::is_floating_point<DstScalar>::value
? -std::numeric_limits<DstScalar>::infinity()
: std::numeric_limits<DstScalar>::lowest();
// max clamp bound of destination values.
DstScalar clamp_max = std::is_floating_point<DstScalar>::value
? std::numeric_limits<DstScalar>::infinity()
: std::numeric_limits<DstScalar>::max();
};
/* Convenience typedefs */
template <typename DstScalar>
using QuantizedGemmParams = GemmParams<std::int32_t, DstScalar>;
using FloatGemmParams = GemmParams<float, float>;
/* Validation functions */
// Note that this uses TFLITE_DCHECK from kernels/internal/compatibility.h
// and not TF_LITE_ASSERT from op_macros.h. We want this to be explicitly
// debug-build-only assertions so that there's not reason not to
// generously validate, and TF_LITE_ASSERT is actually at the moment
// a release-build assertion. See b/131587258.
// Validates self-consistency of GemmParams.
template <typename AccumScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
void ValidateGemmParams(
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params) {
// Guard consistency of the quantized multiplier fields.
if (quantization_flavor == QuantizationFlavor::kFloatingPoint) {
TFLITE_DCHECK(!params.multiplier_fixedpoint);
TFLITE_DCHECK(!params.multiplier_exponent);
TFLITE_DCHECK(!params.multiplier_fixedpoint_perchannel);
TFLITE_DCHECK(!params.multiplier_exponent_perchannel);
} else if (quantization_flavor ==
QuantizationFlavor::kIntegerWithUniformMultiplier &&
!std::is_same<DstScalar, int32_t>::value) {
TFLITE_DCHECK(params.multiplier_fixedpoint);
// Nothing to check about multiplier_exponent
TFLITE_DCHECK(!params.multiplier_fixedpoint_perchannel);
TFLITE_DCHECK(!params.multiplier_exponent_perchannel);
} else if (quantization_flavor ==
QuantizationFlavor::kIntegerWithPerRowMultiplier &&
!std::is_same<DstScalar, int32_t>::value) {
TFLITE_DCHECK(!params.multiplier_fixedpoint);
TFLITE_DCHECK(!params.multiplier_exponent);
TFLITE_DCHECK(params.multiplier_fixedpoint_perchannel);
TFLITE_DCHECK(params.multiplier_exponent_perchannel);
} else {
// For the get raw accumulator case, we should make sure none of the
// quantization params are set.
TFLITE_DCHECK(!params.multiplier_fixedpoint);
TFLITE_DCHECK(!params.multiplier_exponent);
TFLITE_DCHECK(!params.multiplier_fixedpoint_perchannel);
TFLITE_DCHECK(!params.multiplier_exponent_perchannel);
}
}
namespace detail {
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct ValidateTypes {
// This generic implementation is for quantized flavors.
// kFloatingPoint will be a specialization below.
static_assert(!std::is_floating_point<LhsScalar>::value, "");
static_assert(!std::is_floating_point<RhsScalar>::value, "");
static_assert(!std::is_floating_point<AccumScalar>::value, "");
// No requirement on DstScalar --- we might in the future allow it
// to be floating point even in a quantized Gemm.
};
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar>
struct ValidateTypes<LhsScalar, RhsScalar, AccumScalar, DstScalar,
QuantizationFlavor::kFloatingPoint> {
static_assert(std::is_floating_point<LhsScalar>::value, "");
static_assert(std::is_floating_point<RhsScalar>::value, "");
static_assert(std::is_floating_point<AccumScalar>::value, "");
static_assert(std::is_floating_point<DstScalar>::value, "");
};
} // namespace detail
// Validates overall consistency of all the parameters taken by a Gemm call:
// the 3 MatrixParams and the GemmParams.
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
void ValidateParams(
const MatrixParams<LhsScalar>& lhs_params,
const MatrixParams<RhsScalar>& rhs_params,
const MatrixParams<DstScalar>& dst_params,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params) {
(void)detail::ValidateTypes<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>();
ValidateGemmParams(params);
}
// Test if the Gemm is degenerate in some way, e.g. nonsensical dimenions.
template <typename LhsScalar, typename RhsScalar, typename DstScalar>
bool IsValidGemm(const MatrixParams<LhsScalar>& lhs_params,
const MatrixParams<RhsScalar>& rhs_params,
const MatrixParams<DstScalar>& dst_params) {
bool valid = true;
valid &= lhs_params.rows >= 1;
valid &= lhs_params.cols >= 1;
valid &= rhs_params.rows >= 1;
valid &= rhs_params.cols >= 1;
valid &= dst_params.rows >= 1;
valid &= dst_params.cols >= 1;
valid &= lhs_params.cols == rhs_params.rows;
valid &= rhs_params.cols == dst_params.cols;
valid &= lhs_params.rows == lhs_params.rows;
return valid;
}
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_PARAMS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm_params.h | C++ | apache-2.0 | 11,807 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_RUY_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_RUY_H_
#include "ruy/matrix.h" // from @ruy
#include "ruy/mul_params.h" // from @ruy
#include "ruy/ruy.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace cpu_backend_gemm {
namespace detail {
inline ruy::CachePolicy ToRuyCachePolicy(CachePolicy cache_policy) {
switch (cache_policy) {
case CachePolicy::kNeverCache:
return ruy::CachePolicy::kNeverCache;
case CachePolicy::kCacheIfLargeSpeedup:
return ruy::CachePolicy::kCacheIfLargeSpeedup;
case CachePolicy::kAlwaysCache:
return ruy::CachePolicy::kAlwaysCache;
default:
TFLITE_DCHECK(false);
return ruy::CachePolicy::kNeverCache;
}
}
template <typename Scalar, typename DataPointer>
void MakeRuyMatrix(const MatrixParams<Scalar>& params, DataPointer data_ptr,
ruy::Matrix<Scalar>* dst, bool use_caching = false) {
ruy::Order ruy_order = params.order == Order::kColMajor
? ruy::Order::kColMajor
: ruy::Order::kRowMajor;
ruy::MakeSimpleLayout(params.rows, params.cols, ruy_order,
dst->mutable_layout());
// Note that ruy::Matrix::data is a ConstCheckingPtr, not a plain pointer.
// It does care whether we assign to it a Scalar* or a const Scalar*.
dst->set_data(data_ptr);
dst->set_zero_point(params.zero_point);
if (use_caching) {
dst->set_cache_policy(ToRuyCachePolicy(params.cache_policy));
}
}
// Floating-point case.
template <typename AccumScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
struct MakeRuyMulParamsImpl final {
static void Run(
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
ruy::MulParams<AccumScalar, DstScalar>* ruy_mul_params) {
static_assert(quantization_flavor == QuantizationFlavor::kFloatingPoint,
"");
ruy_mul_params->set_bias(params.bias);
ruy_mul_params->set_clamp_min(params.clamp_min);
ruy_mul_params->set_clamp_max(params.clamp_max);
}
};
// Integer-quantized case with destination type narrower than int32
template <typename DstScalar, QuantizationFlavor quantization_flavor>
struct MakeRuyMulParamsImpl<std::int32_t, DstScalar, quantization_flavor>
final {
static void Run(
const GemmParams<std::int32_t, DstScalar, quantization_flavor>& params,
ruy::MulParams<std::int32_t, DstScalar>* ruy_mul_params) {
static_assert(sizeof(DstScalar) < sizeof(std::int32_t), "");
if (quantization_flavor ==
QuantizationFlavor::kIntegerWithUniformMultiplier) {
ruy_mul_params->set_multiplier_fixedpoint(params.multiplier_fixedpoint);
ruy_mul_params->set_multiplier_exponent(params.multiplier_exponent);
}
if (quantization_flavor ==
QuantizationFlavor::kIntegerWithPerRowMultiplier) {
ruy_mul_params->set_multiplier_fixedpoint_perchannel(
params.multiplier_fixedpoint_perchannel);
ruy_mul_params->set_multiplier_exponent_perchannel(
params.multiplier_exponent_perchannel);
}
ruy_mul_params->set_bias(params.bias);
ruy_mul_params->set_clamp_min(params.clamp_min);
ruy_mul_params->set_clamp_max(params.clamp_max);
}
};
// Raw-integer case with destination type int32.
template <QuantizationFlavor quantization_flavor>
struct MakeRuyMulParamsImpl<std::int32_t, std::int32_t, quantization_flavor>
final {
static void Run(
const GemmParams<std::int32_t, std::int32_t, quantization_flavor>& params,
ruy::MulParams<std::int32_t, std::int32_t>* ruy_mul_params) {
ruy_mul_params->set_bias(params.bias);
}
};
template <typename AccumScalar, typename DstScalar,
QuantizationFlavor quantization_flavor>
void MakeRuyMulParams(
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
ruy::MulParams<AccumScalar, DstScalar>* ruy_mul_params) {
MakeRuyMulParamsImpl<AccumScalar, DstScalar, quantization_flavor>::Run(
params, ruy_mul_params);
}
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImplUsingRuy {
static void Run(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
ruy::Matrix<LhsScalar> ruy_lhs;
ruy::Matrix<RhsScalar> ruy_rhs;
ruy::Matrix<DstScalar> ruy_dst;
MakeRuyMatrix(lhs_params, lhs_data, &ruy_lhs, context->use_caching());
MakeRuyMatrix(rhs_params, rhs_data, &ruy_rhs, context->use_caching());
MakeRuyMatrix(dst_params, dst_data, &ruy_dst);
ruy::MulParams<AccumScalar, DstScalar> ruy_mul_params;
MakeRuyMulParams(params, &ruy_mul_params);
ruy::Mul(ruy_lhs, ruy_rhs, ruy_mul_params, context->ruy_context(),
&ruy_dst);
}
};
} // namespace detail
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_GEMM_RUY_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm_ruy.h | C++ | apache-2.0 | 6,088 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_X86_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_X86_H_
// If TFLITE_WITH_RUY is set, Ruy is the only GEMM option. In this header
// we select either Ruy or an alternative based on the SIMD extentions
// available on the given x86 platform.
#ifndef TFLITE_WITH_RUY
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_eigen.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_gemmlowp.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_ruy.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace cpu_backend_gemm {
namespace detail {
template <typename LhsScalar, typename RhsScalar, typename AccumScalar,
typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImplX86 {
static void Run(
const MatrixParams<LhsScalar>& lhs_params, const LhsScalar* lhs_data,
const MatrixParams<RhsScalar>& rhs_params, const RhsScalar* rhs_data,
const MatrixParams<DstScalar>& dst_params, DstScalar* dst_data,
const GemmParams<AccumScalar, DstScalar, quantization_flavor>& params,
CpuBackendContext* context) {
// TODO(b/168923364) Ruy is preferred on x86, but check if the deprecated
// path is enabled.
if (context->PreferGemmlowpOnX86()) {
// Dispatch to gemmlowp.
detail::GemmImplUsingGemmlowp<
LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context);
return;
}
// Run-time dispatch to Ruy for platforms with AVX or above.
detail::GemmImplUsingRuy<LhsScalar, RhsScalar, AccumScalar, DstScalar,
quantization_flavor>::Run(lhs_params, lhs_data,
rhs_params, rhs_data,
dst_params, dst_data,
params, context);
}
};
// For float, defer to eigen for now.
template <>
struct GemmImplX86<float, float, float, float,
QuantizationFlavor::kFloatingPoint> {
static void Run(const MatrixParams<float>& lhs_params, const float* lhs_data,
const MatrixParams<float>& rhs_params, const float* rhs_data,
const MatrixParams<float>& dst_params, float* dst_data,
const GemmParams<float, float,
QuantizationFlavor::kFloatingPoint>& params,
CpuBackendContext* context) {
GemmImplUsingEigen::Run(lhs_params, lhs_data, rhs_params, rhs_data,
dst_params, dst_data, params, context);
}
};
// gemmlowp requires NEON for certain quantization cases. See note in
// cpu_backend_gemm.h
#if !defined(GEMMLOWP_NEON)
template <typename SrcScalar, QuantizationFlavor quantization_flavor>
struct GemmImplX86<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<SrcScalar, SrcScalar, std::int32_t, std::int8_t,
quantization_flavor> {};
template <typename DstScalar, QuantizationFlavor quantization_flavor>
struct GemmImplX86<std::int8_t, std::int8_t, std::int32_t, DstScalar,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
DstScalar, quantization_flavor> {};
template <QuantizationFlavor quantization_flavor>
struct GemmImplX86<std::int8_t, std::int8_t, std::int32_t, std::int8_t,
quantization_flavor>
: detail::GemmImplUsingRuy<std::int8_t, std::int8_t, std::int32_t,
std::int8_t, quantization_flavor> {};
#endif // not GEMMLOWP_NEON
} // namespace detail
} // namespace cpu_backend_gemm
} // namespace tflite
#endif // not TFLITE_WITH_RUY
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_X86_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_gemm_x86.h | C++ | apache-2.0 | 4,789 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#define TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#ifdef TFLITE_WITH_RUY
#include "ruy/context.h" // from @ruy
#include "ruy/thread_pool.h" // from @ruy
#else
#include "public/gemmlowp.h"
#endif
namespace tflite {
namespace cpu_backend_threadpool {
#ifdef TFLITE_WITH_RUY
using Task = ruy::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->ruy_context()->mutable_thread_pool()->Execute(
tasks_count, tasks);
}
#else // not TFLITE_WITH_RUY
using Task = gemmlowp::Task;
template <typename TaskType>
void Execute(int tasks_count, TaskType* tasks,
CpuBackendContext* cpu_backend_context) {
TFLITE_DCHECK_LE(tasks_count, cpu_backend_context->max_num_threads());
cpu_backend_context->gemmlowp_context()->workers_pool()->Execute(tasks_count,
tasks);
}
#endif
} // namespace cpu_backend_threadpool
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CPU_BACKEND_THREADPOOL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/cpu_backend_threadpool.h | C++ | apache-2.0 | 2,016 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_CUSTOM_OPS_REGISTER_H_
#define TENSORFLOW_LITE_KERNELS_CUSTOM_OPS_REGISTER_H_
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace ops {
namespace custom {
TfLiteRegistration* Register_ATAN2();
TfLiteRegistration* Register_HASHTABLE();
TfLiteRegistration* Register_HASHTABLE_FIND();
TfLiteRegistration* Register_HASHTABLE_IMPORT();
TfLiteRegistration* Register_HASHTABLE_SIZE();
TfLiteRegistration* Register_IRFFT2D();
TfLiteRegistration* Register_MULTINOMIAL();
TfLiteRegistration* Register_RANDOM_STANDARD_NORMAL();
TfLiteRegistration* Register_RANDOM_UNIFORM();
TfLiteRegistration* Register_RANDOM_UNIFORM_INT();
TfLiteRegistration* Register_SIGN();
} // namespace custom
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_CUSTOM_OPS_REGISTER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/custom_ops_register.h | C++ | apache-2.0 | 1,497 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_DEQUANTIZE_H_
#define TENSORFLOW_LITE_KERNELS_DEQUANTIZE_H_
#include <stdint.h>
#include "third_party/eigen3/Eigen/Core"
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/dequantize.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/dequantize.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace ops {
namespace builtin {
namespace dequantize {
// This file has two implementation of Dequantize.
enum KernelType {
kReference,
kGenericOptimized,
};
template <KernelType kernel_type>
TfLiteStatus DequantizeImpl(TfLiteContext* context, TfLiteNode* node,
const TfLiteTensor* input, TfLiteTensor* output) {
DequantizationParams op_params;
op_params.zero_point = input->params.zero_point;
op_params.scale = input->params.scale;
switch (input->type) {
case kTfLiteUInt8:
if (kernel_type == kReference) {
reference_ops::Dequantize(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::Dequantize(
op_params, GetTensorShape(input), GetTensorData<uint8_t>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
break;
case kTfLiteInt8:
if (kernel_type == kReference) {
reference_integer_ops::Dequantize<int8_t>(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::Dequantize(
op_params, GetTensorShape(input), GetTensorData<int8_t>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
break;
case kTfLiteInt16:
if (kernel_type == kReference) {
reference_integer_ops::Dequantize<int16_t>(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<float>(output));
} else {
optimized_ops::Dequantize(
op_params, GetTensorShape(input), GetTensorData<int16_t>(input),
GetTensorShape(output), GetTensorData<float>(output));
}
break;
case kTfLiteFloat16: {
const Eigen::half* half_data = reinterpret_cast<const Eigen::half*>(
GetTensorData<TfLiteFloat16>(input));
reference_ops::Dequantize(GetTensorShape(input), half_data,
GetTensorShape(output),
GetTensorData<float>(output));
break;
}
default:
context->ReportError(context, "Type %d not supported.", input->type);
return kTfLiteError;
}
return kTfLiteOk;
}
} // namespace dequantize
} // namespace builtin
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_DEQUANTIZE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/dequantize.h | C++ | apache-2.0 | 3,875 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
#define TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
#include "tensorflow/lite/c/common.h"
namespace EigenForTFLite {
struct ThreadPoolDevice;
}
namespace tflite {
namespace eigen_support {
// Let the framework know that the op will be using Eigen. If necessary a set of
// temporary Eigen objects might be created and placed in 'context'.
void IncrementUsageCounter(TfLiteContext* context);
// Let the framework know that the op stopped using Eigen. If there are no more
// usages all temporary Eigen objects will be deleted.
void DecrementUsageCounter(TfLiteContext* context);
// Fetch the ThreadPoolDevice associated with the provided context.
//
// Note: The caller must ensure that |IncrementUsageCounter()| has already been
// called. Moreover, it is *not* safe to cache the returned device; it may be
// invalidated if the context thread count changes.
const EigenForTFLite::ThreadPoolDevice* GetThreadPoolDevice(
TfLiteContext* context);
} // namespace eigen_support
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_EIGEN_SUPPORT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/eigen_support.h | C++ | apache-2.0 | 1,777 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Forward declares registrations for specific FC layer implementations. Do not
// include this header if you are fine with any FC implementation, include
// builtin_op_kernels.h instead. This implementation-specific registration is
// only available for FC, as these versions are explicitly tested and supported.
#ifndef TENSORFLOW_LITE_KERNELS_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_FULLY_CONNECTED_H_
#include "tensorflow/lite/c/common.h"
namespace tflite {
namespace ops {
namespace builtin {
TfLiteRegistration* Register_FULLY_CONNECTED_REF();
TfLiteRegistration* Register_FULLY_CONNECTED_GENERIC_OPT();
TfLiteRegistration* Register_FULLY_CONNECTED_PIE();
TfLiteRegistration* Register_FULLY_CONNECTED_SPARSE_REF();
TfLiteRegistration* Register_FULLY_CONNECTED_SPARSE_OPT();
} // namespace builtin
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_FULLY_CONNECTED_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/fully_connected.h | C++ | apache-2.0 | 1,586 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_GRU_CELL_H_
#define TENSORFLOW_LITE_KERNELS_GRU_CELL_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
namespace tflite {
namespace ops {
namespace custom {
namespace gru_cell {
void GruCell(const RuntimeShape& input_shape, const float* input,
const RuntimeShape& state_shape, const float* input_state,
const RuntimeShape& gate_weight_shape, const float* gate_weight,
const RuntimeShape& gate_bias_shape, const float* gate_bias,
const RuntimeShape& candidate_weight_shape,
const float* candidate_weight,
const RuntimeShape& candidate_bias_shape,
const float* candidate_bias, const RuntimeShape& output_shape,
float* output, float* output_state,
const RuntimeShape& activation_shape, float* activation,
const RuntimeShape& concat_shape, float* concat,
const tflite::FullyConnectedParams& fc_params,
tflite::CpuBackendContext* cpu_backend_context);
} // namespace gru_cell
} // namespace custom
} // namespace ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_GRU_CELL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/gru_cell.h | C++ | apache-2.0 | 1,911 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
#ifndef ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
#ifdef GEMMLOWP_ALLOW_SLOW_SCALAR_FALLBACK
#define ALLOW_SLOW_GENERIC_DEPTHWISECONV_FALLBACK
#endif
#endif
#include <functional>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
constexpr int kReverseShift = -1;
inline void GetActivationMinMax(FusedActivationFunctionType ac,
float* output_activation_min,
float* output_activation_max) {
switch (ac) {
case FusedActivationFunctionType::kNone:
*output_activation_min = std::numeric_limits<float>::lowest();
*output_activation_max = std::numeric_limits<float>::max();
break;
case FusedActivationFunctionType::kRelu:
*output_activation_min = 0.f;
*output_activation_max = std::numeric_limits<float>::max();
break;
case FusedActivationFunctionType::kRelu1:
*output_activation_min = -1.f;
*output_activation_max = 1.f;
break;
case FusedActivationFunctionType::kRelu6:
*output_activation_min = 0.f;
*output_activation_max = 6.f;
break;
}
}
template <typename T>
inline T ActivationFunctionWithMinMax(T x, T output_activation_min,
T output_activation_max) {
using std::max;
using std::min;
return min(max(x, output_activation_min), output_activation_max);
}
// Legacy function, left for compatibility only.
template <FusedActivationFunctionType Ac>
float ActivationFunction(float x) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
return ActivationFunctionWithMinMax(x, output_activation_min,
output_activation_max);
}
inline void BiasAndClamp(float clamp_min, float clamp_max, int bias_size,
const float* bias_data, int array_size,
float* array_data) {
// Note: see b/132215220: in May 2019 we thought it would be OK to replace
// this with the Eigen one-liner:
// return (array.colwise() + bias).cwiseMin(clamp_max).cwiseMin(clamp_max).
// This turned out to severely regress performance: +4ms (i.e. 8%) on
// MobileNet v2 / 1.0 / 224. So we keep custom NEON code for now.
TFLITE_DCHECK_EQ((array_size % bias_size), 0);
#ifdef USE_NEON
float* array_ptr = array_data;
float* array_end_ptr = array_ptr + array_size;
const auto clamp_min_vec = vdupq_n_f32(clamp_min);
const auto clamp_max_vec = vdupq_n_f32(clamp_max);
for (; array_ptr != array_end_ptr; array_ptr += bias_size) {
int i = 0;
for (; i <= bias_size - 16; i += 16) {
auto b0 = vld1q_f32(bias_data + i);
auto b1 = vld1q_f32(bias_data + i + 4);
auto b2 = vld1q_f32(bias_data + i + 8);
auto b3 = vld1q_f32(bias_data + i + 12);
auto a0 = vld1q_f32(array_ptr + i);
auto a1 = vld1q_f32(array_ptr + i + 4);
auto a2 = vld1q_f32(array_ptr + i + 8);
auto a3 = vld1q_f32(array_ptr + i + 12);
auto x0 = vaddq_f32(a0, b0);
auto x1 = vaddq_f32(a1, b1);
auto x2 = vaddq_f32(a2, b2);
auto x3 = vaddq_f32(a3, b3);
x0 = vmaxq_f32(clamp_min_vec, x0);
x1 = vmaxq_f32(clamp_min_vec, x1);
x2 = vmaxq_f32(clamp_min_vec, x2);
x3 = vmaxq_f32(clamp_min_vec, x3);
x0 = vminq_f32(clamp_max_vec, x0);
x1 = vminq_f32(clamp_max_vec, x1);
x2 = vminq_f32(clamp_max_vec, x2);
x3 = vminq_f32(clamp_max_vec, x3);
vst1q_f32(array_ptr + i, x0);
vst1q_f32(array_ptr + i + 4, x1);
vst1q_f32(array_ptr + i + 8, x2);
vst1q_f32(array_ptr + i + 12, x3);
}
for (; i <= bias_size - 4; i += 4) {
auto b = vld1q_f32(bias_data + i);
auto a = vld1q_f32(array_ptr + i);
auto x = vaddq_f32(a, b);
x = vmaxq_f32(clamp_min_vec, x);
x = vminq_f32(clamp_max_vec, x);
vst1q_f32(array_ptr + i, x);
}
for (; i < bias_size; i++) {
array_ptr[i] = ActivationFunctionWithMinMax(array_ptr[i] + bias_data[i],
clamp_min, clamp_max);
}
}
#else // not NEON
for (int array_offset = 0; array_offset < array_size;
array_offset += bias_size) {
for (int i = 0; i < bias_size; i++) {
array_data[array_offset + i] = ActivationFunctionWithMinMax(
array_data[array_offset + i] + bias_data[i], clamp_min, clamp_max);
}
}
#endif
}
inline int32_t MultiplyByQuantizedMultiplierSmallerThanOneExp(
int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
return RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(x, quantized_multiplier), -left_shift);
}
inline int32_t MultiplyByQuantizedMultiplierGreaterThanOne(
int32_t x, int32_t quantized_multiplier, int left_shift) {
using gemmlowp::SaturatingRoundingDoublingHighMul;
return SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
quantized_multiplier);
}
inline int32_t MultiplyByQuantizedMultiplier(int32_t x,
int32_t quantized_multiplier,
int shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
int left_shift = shift > 0 ? shift : 0;
int right_shift = shift > 0 ? 0 : -shift;
return RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
x * (1 << left_shift), quantized_multiplier),
right_shift);
}
inline int32_t MultiplyByQuantizedMultiplier(int64_t x,
int32_t quantized_multiplier,
int shift) {
// Inputs:
// - quantized_multiplier has fixed point at bit 31
// - shift is -31 to +7 (negative for right shift)
//
// Assumptions: The following input ranges are assumed
// - quantize_scale>=0 (the usual range is (1<<30) to (1>>31)-1)
// - scaling is chosen so final scaled result fits in int32_t
// - input x is in the range -(1<<47) <= x < (1<<47)
assert(quantized_multiplier >= 0);
assert(shift >= -31 && shift < 8);
assert(x >= -(static_cast<int64_t>(1) << 47) &&
x < (static_cast<int64_t>(1) << 47));
int32_t reduced_multiplier = (quantized_multiplier < 0x7FFF0000)
? ((quantized_multiplier + (1 << 15)) >> 16)
: 0x7FFF;
int total_shift = 15 - shift;
x = (x * (int64_t)reduced_multiplier) + ((int64_t)1 << (total_shift - 1));
int32_t result = x >> total_shift;
return result;
}
#ifdef USE_NEON
// Round uses ARM's rounding shift right.
inline int32x4x4_t MultiplyByQuantizedMultiplier4Rows(
int32x4x4_t input_val, int32_t quantized_multiplier, int shift) {
const int left_shift = std::max(shift, 0);
const int right_shift = std::min(shift, 0);
int32x4x4_t result;
int32x4_t multiplier_dup = vdupq_n_s32(quantized_multiplier);
int32x4_t left_shift_dup = vdupq_n_s32(left_shift);
int32x4_t right_shift_dup = vdupq_n_s32(right_shift);
result.val[0] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[0], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[1] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[1], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[2] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[2], left_shift_dup),
multiplier_dup),
right_shift_dup);
result.val[3] =
vrshlq_s32(vqrdmulhq_s32(vshlq_s32(input_val.val[3], left_shift_dup),
multiplier_dup),
right_shift_dup);
return result;
}
#endif
template <typename T>
int CountLeadingZeros(T integer_input) {
static_assert(std::is_unsigned<T>::value,
"Only unsigned integer types handled.");
#if defined(__GNUC__)
return integer_input ? __builtin_clz(integer_input)
: std::numeric_limits<T>::digits;
#else
if (integer_input == 0) {
return std::numeric_limits<T>::digits;
}
const T one_in_leading_positive = static_cast<T>(1)
<< (std::numeric_limits<T>::digits - 1);
int leading_zeros = 0;
while (integer_input < one_in_leading_positive) {
integer_input <<= 1;
++leading_zeros;
}
return leading_zeros;
#endif
}
template <typename T>
inline int CountLeadingSignBits(T integer_input) {
static_assert(std::is_signed<T>::value, "Only signed integer types handled.");
#if defined(__GNUC__) && !defined(__clang__)
return integer_input ? __builtin_clrsb(integer_input)
: std::numeric_limits<T>::digits;
#else
using U = typename std::make_unsigned<T>::type;
return integer_input >= 0
? CountLeadingZeros(static_cast<U>(integer_input)) - 1
: integer_input != std::numeric_limits<T>::min()
? CountLeadingZeros(2 * static_cast<U>(-integer_input) - 1)
: 0;
#endif
}
// Use "count leading zeros" helper functions to do a fast Floor(log_2(x)).
template <typename Integer>
inline Integer FloorLog2(Integer n) {
static_assert(std::is_integral<Integer>::value, "");
static_assert(std::is_signed<Integer>::value, "");
static_assert(sizeof(Integer) == 4 || sizeof(Integer) == 8, "");
TFLITE_CHECK_GT(n, 0);
if (sizeof(Integer) == 4) {
return 30 - CountLeadingSignBits(n);
} else {
return 62 - CountLeadingSignBits(n);
}
}
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
// softmax
// func - the function to build the LUT for (e.g exp(x))
// min,max - table limits
// table - pointer to buffer
// num - number of elements in the LUT
inline void gen_lut(double (*func)(double), double min, double max,
int16_t* table, const int num) {
// size of table should equal to num + 1
// last element only for slope calculation
double step = (max - min) / (num - 1);
double half_step = step / 2.0;
for (int i = 0; i < num - 1; i++) {
double sample_val = TfLiteRound(func(min + i * step) * 32768.0);
double midpoint_interp_val =
TfLiteRound((func(min + (i + 1) * step) * 32768.0 +
TfLiteRound(func(min + i * step) * 32768.0)) /
2.0);
double midpoint_val =
TfLiteRound(func(min + i * step + half_step) * 32768.0);
double midpoint_err = midpoint_interp_val - midpoint_val;
double bias = TfLiteRound(midpoint_err / 2.0);
table[i] = std::min<double>(std::max<double>(sample_val - bias, -32768.0),
32767.0);
}
table[num - 1] = std::min<double>(
std::max<double>(TfLiteRound(func(max) * 32768.0), -32768.0), 32767.0);
}
// generate INT16 LUT for function(), e.g., table exp(x) and 1/(1+x) used in
// softmax
// func - the function to build the LUT for (e.g exp(x))
// min,max - table limits
// table - pointer to buffer
// num - number of elements in the LUT
inline void gen_lut(float (*func)(float), float min, float max, int16_t* table,
const int num) {
// size of table should equal to num + 1
// last element only for slope calculation
float step = (max - min) / (num - 1);
float half_step = step / 2.0f;
for (int i = 0; i < num - 1; i++) {
float sample_val = TfLiteRound(func(min + i * step) * 32768.0f);
float midpoint_interp_val =
TfLiteRound((func(min + (i + 1) * step) * 32768.0f +
TfLiteRound(func(min + i * step) * 32768.0f)) /
2.0f);
float midpoint_val =
TfLiteRound(func(min + i * step + half_step) * 32768.0f);
float midpoint_err = midpoint_interp_val - midpoint_val;
float bias = TfLiteRound(midpoint_err / 2.0f);
table[i] = std::min<float>(std::max<float>(sample_val - bias, -32768.0f),
32767.0f);
}
table[num - 1] = std::min<float>(
std::max<float>(TfLiteRound(func(max) * 32768.0f), -32768.0f), 32767.0f);
}
// int16_t func table lookup, e.g., lookup exp() and 1/(1+x) used in softmax
inline int16_t generic_int16_table_lookup(int16_t value, const int16_t* lut) {
// 512 base value, lut[513] only for calculate slope
uint16_t index = static_cast<uint16_t>(256 + (value >> 7));
assert(index < 512 && "LUT index out of range.");
int16_t offset = value & 0x7f;
// base and slope are Q0.15
int16_t base = lut[index];
int16_t slope = lut[index + 1] - lut[index];
// Q0.15 * Q0.7 = Q0.22
// Round and convert from Q0.22 to Q0.15
int32_t delta = (static_cast<int32_t>(slope) * offset + 64) >> 7;
// Q0.15 + Q0.15
return base + delta;
}
// Table of sigmoid(i/24) at 0.16 format - 256 elements.
// We use combined sigmoid and tanh look-up table, since
// tanh(x) = 2*sigmoid(2*x) -1.
// Both functions are symmetric, so the LUT table is only needed
// for the absolute value of the input.
static const uint16_t sigmoid_table_uint16[256] = {
32768, 33451, 34133, 34813, 35493, 36169, 36843, 37513, 38180, 38841, 39498,
40149, 40794, 41432, 42064, 42688, 43304, 43912, 44511, 45102, 45683, 46255,
46817, 47369, 47911, 48443, 48964, 49475, 49975, 50464, 50942, 51409, 51865,
52311, 52745, 53169, 53581, 53983, 54374, 54755, 55125, 55485, 55834, 56174,
56503, 56823, 57133, 57433, 57724, 58007, 58280, 58544, 58800, 59048, 59288,
59519, 59743, 59959, 60168, 60370, 60565, 60753, 60935, 61110, 61279, 61441,
61599, 61750, 61896, 62036, 62172, 62302, 62428, 62549, 62666, 62778, 62886,
62990, 63090, 63186, 63279, 63368, 63454, 63536, 63615, 63691, 63765, 63835,
63903, 63968, 64030, 64090, 64148, 64204, 64257, 64308, 64357, 64405, 64450,
64494, 64536, 64576, 64614, 64652, 64687, 64721, 64754, 64786, 64816, 64845,
64873, 64900, 64926, 64950, 64974, 64997, 65019, 65039, 65060, 65079, 65097,
65115, 65132, 65149, 65164, 65179, 65194, 65208, 65221, 65234, 65246, 65258,
65269, 65280, 65291, 65301, 65310, 65319, 65328, 65337, 65345, 65352, 65360,
65367, 65374, 65381, 65387, 65393, 65399, 65404, 65410, 65415, 65420, 65425,
65429, 65433, 65438, 65442, 65445, 65449, 65453, 65456, 65459, 65462, 65465,
65468, 65471, 65474, 65476, 65479, 65481, 65483, 65485, 65488, 65489, 65491,
65493, 65495, 65497, 65498, 65500, 65501, 65503, 65504, 65505, 65507, 65508,
65509, 65510, 65511, 65512, 65513, 65514, 65515, 65516, 65517, 65517, 65518,
65519, 65520, 65520, 65521, 65522, 65522, 65523, 65523, 65524, 65524, 65525,
65525, 65526, 65526, 65526, 65527, 65527, 65528, 65528, 65528, 65529, 65529,
65529, 65529, 65530, 65530, 65530, 65530, 65531, 65531, 65531, 65531, 65531,
65532, 65532, 65532, 65532, 65532, 65532, 65533, 65533, 65533, 65533, 65533,
65533, 65533, 65533, 65534, 65534, 65534, 65534, 65534, 65534, 65534, 65534,
65534, 65534, 65535};
// TODO(b/77858996): Add these to gemmlowp.
template <typename IntegerType>
IntegerType SaturatingAddNonGemmlowp(IntegerType a, IntegerType b) {
static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
return a;
}
template <>
inline std::int32_t SaturatingAddNonGemmlowp(std::int32_t a, std::int32_t b) {
std::int64_t a64 = a;
std::int64_t b64 = b;
std::int64_t sum = a64 + b64;
return static_cast<std::int32_t>(std::min(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
std::max(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
sum)));
}
template <typename tRawType, int tIntegerBits>
gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingAddNonGemmlowp(
gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
SaturatingAddNonGemmlowp(a.raw(), b.raw()));
}
template <typename IntegerType>
IntegerType SaturatingSub(IntegerType a, IntegerType b) {
static_assert(std::is_same<IntegerType, void>::value, "unimplemented");
return a;
}
template <>
inline std::int16_t SaturatingSub(std::int16_t a, std::int16_t b) {
std::int32_t a32 = a;
std::int32_t b32 = b;
std::int32_t diff = a32 - b32;
return static_cast<std::int16_t>(
std::min(static_cast<int32_t>(32767),
std::max(static_cast<int32_t>(-32768), diff)));
}
template <>
inline std::int32_t SaturatingSub(std::int32_t a, std::int32_t b) {
std::int64_t a64 = a;
std::int64_t b64 = b;
std::int64_t diff = a64 - b64;
return static_cast<std::int32_t>(std::min(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::max()),
std::max(
static_cast<std::int64_t>(std::numeric_limits<std::int32_t>::min()),
diff)));
}
template <typename tRawType, int tIntegerBits>
gemmlowp::FixedPoint<tRawType, tIntegerBits> SaturatingSub(
gemmlowp::FixedPoint<tRawType, tIntegerBits> a,
gemmlowp::FixedPoint<tRawType, tIntegerBits> b) {
return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
SaturatingSub(a.raw(), b.raw()));
}
// End section to be moved to gemmlowp.
template <typename IntegerType>
IntegerType SaturatingRoundingMultiplyByPOTParam(IntegerType x, int exponent) {
if (exponent == 0) {
return x;
}
using ScalarIntegerType =
typename gemmlowp::FixedPointRawTypeTraits<IntegerType>::ScalarRawType;
const IntegerType min =
gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::min());
const IntegerType max =
gemmlowp::Dup<IntegerType>(std::numeric_limits<ScalarIntegerType>::max());
const int ScalarIntegerTypeBits = 8 * sizeof(ScalarIntegerType);
const std::int32_t threshold =
((1 << (ScalarIntegerTypeBits - 1 - exponent)) - 1);
const IntegerType positive_mask =
gemmlowp::MaskIfGreaterThan(x, gemmlowp::Dup<IntegerType>(threshold));
const IntegerType negative_mask =
gemmlowp::MaskIfLessThan(x, gemmlowp::Dup<IntegerType>(-threshold));
IntegerType result = gemmlowp::ShiftLeft(x, exponent);
result = gemmlowp::SelectUsingMask(positive_mask, max, result);
result = gemmlowp::SelectUsingMask(negative_mask, min, result);
return result;
}
// If we want to leave IntegerBits fixed, then multiplication
// by a power of two has to be saturating/rounding, not exact anymore.
template <typename tRawType, int tIntegerBits>
gemmlowp::FixedPoint<tRawType, tIntegerBits>
SaturatingRoundingMultiplyByPOTParam(
gemmlowp::FixedPoint<tRawType, tIntegerBits> a, int exponent) {
return gemmlowp::FixedPoint<tRawType, tIntegerBits>::FromRaw(
SaturatingRoundingMultiplyByPOTParam(a.raw(), exponent));
}
// Convert int32_t multiplier to int16_t with rounding.
inline void DownScaleInt32ToInt16Multiplier(int32_t multiplier_int32_t,
int16_t* multiplier_int16_t) {
TFLITE_DCHECK_GE(multiplier_int32_t, 0);
static constexpr int32_t kRoundingOffset = 1 << 15;
if (multiplier_int32_t >=
std::numeric_limits<int32_t>::max() - kRoundingOffset) {
*multiplier_int16_t = std::numeric_limits<int16_t>::max();
return;
}
const int32_t result = (multiplier_int32_t + kRoundingOffset) >> 16;
TFLITE_DCHECK_LE(result << 16, multiplier_int32_t + kRoundingOffset);
TFLITE_DCHECK_GT(result << 16, multiplier_int32_t - kRoundingOffset);
*multiplier_int16_t = result;
TFLITE_DCHECK_EQ(*multiplier_int16_t, result);
}
// Minimum output bits to accommodate log of maximum input range. It actually
// does not matter if one considers, say, [-64,64] or [-64,64).
//
// For example, run this through Octave:
// [0:127; ...
// ceil(log(abs( log(2.^(0:127))+1 ))/log(2)); ...
// ceil(log(abs( log(2.^(0:127))+1 ))/log(2))]
constexpr int min_log_x_output_bits(int input_bits) {
return input_bits > 90 ? 7
: input_bits > 44 ? 6
: input_bits > 21 ? 5
: input_bits > 10 ? 4
: input_bits > 4 ? 3
: input_bits > 1 ? 2
: 1;
}
// Although currently the name of this function says that it cannot handle
// values less than 1, in practice it can handle as low as 1/x_max, where
// x_max is the largest representable input. In other words, the output range
// is symmetric.
template <int OutputIntegerBits, int InputIntegerBits>
inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
log_x_for_x_greater_than_or_equal_to_1_impl(
gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
// assert(__builtin_clz(0u) >= std::numeric_limits<uint32_t>::digits - 1);
// assert(__builtin_clz(0u) <= std::numeric_limits<uint32_t>::digits);
using FixedPoint0 = gemmlowp::FixedPoint<int32_t, 0>;
// The reason for accumulating the result with an extra bit of headroom is
// that z_pow_2_adj * log_2 might be saturated, and adding num_scaled *
// recip_denom will otherwise introduce an error.
static constexpr int kAccumIntegerBits = OutputIntegerBits + 1;
using FixedPointAccum = gemmlowp::FixedPoint<int32_t, kAccumIntegerBits>;
const FixedPoint0 log_2 = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1488522236, std::log(2.0));
const FixedPoint0 sqrt_sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1805811301, std::sqrt(std::sqrt(0.5)));
const FixedPoint0 sqrt_half = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1518500250, std::sqrt(0.5));
const FixedPoint0 one_quarter =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(FixedPoint0, 536870912, 1.0 / 4.0);
const FixedPoint0 alpha_n = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 117049297, 11.0 / 240.0 * std::sqrt(std::sqrt(2.0)));
const FixedPoint0 alpha_d = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 127690142, 1.0 / 20.0 * std::sqrt(std::sqrt(2.0)));
const FixedPoint0 alpha_i = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 1057819769,
2.0 / std::sqrt(std::sqrt(2.0)) - std::sqrt(std::sqrt(2.0)));
const FixedPoint0 alpha_f = GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(
FixedPoint0, 638450708, 1.0 / 4.0 * std::sqrt(std::sqrt(2.0)));
const FixedPointAccum shifted_quarter =
gemmlowp::Rescale<kAccumIntegerBits>(one_quarter);
// Reinterpret the input value as Q0.31, because we will figure out the
// required shift "ourselves" instead of using, say, Rescale.
FixedPoint0 z_a = FixedPoint0::FromRaw(input_val.raw());
// z_a_pow_2 = input_integer_bits - z_a_headroom;
int z_a_headroom_plus_1 = CountLeadingZeros(static_cast<uint32_t>(z_a.raw()));
FixedPoint0 r_a_tmp =
SaturatingRoundingMultiplyByPOTParam(z_a, (z_a_headroom_plus_1 - 1));
const int32_t r_a_raw =
SaturatingRoundingMultiplyByPOTParam((r_a_tmp * sqrt_half).raw(), 1);
// z_pow_2_adj = max(z_pow_2_a - 0.75, z_pow_2_b - 0.25);
// z_pow_2_adj = max(InputIntegerBits - z_a_headroom_plus_1 + 0.25,
// InputIntegerBits - z_b_headroom - 0.25);
const FixedPointAccum z_a_pow_2_adj = SaturatingAddNonGemmlowp(
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
static_cast<int32_t>(InputIntegerBits - z_a_headroom_plus_1),
31 - kAccumIntegerBits)),
shifted_quarter);
// z_b is treated like z_a, but premultiplying by sqrt(0.5).
FixedPoint0 z_b = z_a * sqrt_half;
int z_b_headroom = CountLeadingZeros(static_cast<uint32_t>(z_b.raw())) - 1;
const int32_t r_b_raw =
SaturatingRoundingMultiplyByPOTParam(z_a.raw(), z_b_headroom);
const FixedPointAccum z_b_pow_2_adj = SaturatingSub(
FixedPointAccum::FromRaw(SaturatingRoundingMultiplyByPOTParam(
static_cast<int32_t>(InputIntegerBits - z_b_headroom),
31 - kAccumIntegerBits)),
shifted_quarter);
const FixedPoint0 r = FixedPoint0::FromRaw(std::min(r_a_raw, r_b_raw));
const FixedPointAccum z_pow_2_adj = FixedPointAccum::FromRaw(
std::max(z_a_pow_2_adj.raw(), z_b_pow_2_adj.raw()));
const FixedPoint0 p = gemmlowp::RoundingHalfSum(r, sqrt_sqrt_half);
FixedPoint0 q = r - sqrt_sqrt_half;
q = q + q;
const FixedPoint0 common_sq = q * q;
const FixedPoint0 num = q * r + q * common_sq * alpha_n;
const FixedPoint0 denom_minus_one_0 =
p * (alpha_i + q + alpha_d * common_sq) + alpha_f * q;
const FixedPoint0 recip_denom =
one_over_one_plus_x_for_x_in_0_1(denom_minus_one_0);
const FixedPointAccum num_scaled = gemmlowp::Rescale<kAccumIntegerBits>(num);
return gemmlowp::Rescale<OutputIntegerBits>(z_pow_2_adj * log_2 +
num_scaled * recip_denom);
}
template <int OutputIntegerBits, int InputIntegerBits>
inline gemmlowp::FixedPoint<int32_t, OutputIntegerBits>
log_x_for_x_greater_than_or_equal_to_1(
gemmlowp::FixedPoint<int32_t, InputIntegerBits> input_val) {
static_assert(
OutputIntegerBits >= min_log_x_output_bits(InputIntegerBits),
"Output integer bits must be sufficient to accommodate logs of inputs.");
return log_x_for_x_greater_than_or_equal_to_1_impl<OutputIntegerBits,
InputIntegerBits>(
input_val);
}
inline int32_t GetReciprocal(int32_t x, int x_integer_digits,
int* num_bits_over_unit) {
int headroom_plus_one = CountLeadingZeros(static_cast<uint32_t>(x));
// This is the number of bits to the left of the binary point above 1.0.
// Consider x=1.25. In that case shifted_scale=0.8 and
// no later adjustment will be needed.
*num_bits_over_unit = x_integer_digits - headroom_plus_one;
const int32_t shifted_sum_minus_one =
static_cast<int32_t>((static_cast<uint32_t>(x) << headroom_plus_one) -
(static_cast<uint32_t>(1) << 31));
gemmlowp::FixedPoint<int32_t, 0> shifted_scale =
gemmlowp::one_over_one_plus_x_for_x_in_0_1(
gemmlowp::FixedPoint<int32_t, 0>::FromRaw(shifted_sum_minus_one));
return shifted_scale.raw();
}
inline void GetInvSqrtQuantizedMultiplierExp(int32_t input, int reverse_shift,
int32_t* output_inv_sqrt,
int* output_shift) {
TFLITE_DCHECK_GE(input, 0);
if (input <= 1) {
// Handle the input value 1 separately to avoid overflow in that case
// in the general computation below (b/143972021). Also handle 0 as if it
// were a 1. 0 is an invalid input here (divide by zero) and 1 is a valid
// but rare/unrealistic input value. We can expect both to occur in some
// incompletely trained models, but probably not in fully trained models.
*output_inv_sqrt = std::numeric_limits<std::int32_t>::max();
*output_shift = 0;
return;
}
TFLITE_DCHECK_GT(input, 1);
*output_shift = 11;
while (input >= (1 << 29)) {
input /= 4;
++*output_shift;
}
const unsigned max_left_shift_bits =
CountLeadingZeros(static_cast<uint32_t>(input)) - 1;
const unsigned max_left_shift_bit_pairs = max_left_shift_bits / 2;
const unsigned left_shift_bit_pairs = max_left_shift_bit_pairs - 1;
*output_shift -= left_shift_bit_pairs;
input <<= 2 * left_shift_bit_pairs;
TFLITE_DCHECK_GE(input, (1 << 27));
TFLITE_DCHECK_LT(input, (1 << 29));
using gemmlowp::FixedPoint;
using gemmlowp::Rescale;
using gemmlowp::SaturatingRoundingMultiplyByPOT;
// Using 3 integer bits gives us enough room for the internal arithmetic in
// this Newton-Raphson iteration.
using F3 = FixedPoint<int32_t, 3>;
using F0 = FixedPoint<int32_t, 0>;
const F3 fixedpoint_input = F3::FromRaw(input >> 1);
const F3 fixedpoint_half_input =
SaturatingRoundingMultiplyByPOT<-1>(fixedpoint_input);
const F3 fixedpoint_half_three =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F3, (1 << 28) + (1 << 27), 1.5);
// Newton-Raphson iteration
// Naive unoptimized starting guess: x = 1
F3 x = F3::One();
// Naive unoptimized number of iterations: 5
for (int i = 0; i < 5; i++) {
const F3 x3 = Rescale<3>(x * x * x);
x = Rescale<3>(fixedpoint_half_three * x - fixedpoint_half_input * x3);
}
const F0 fixedpoint_half_sqrt_2 =
GEMMLOWP_CHECKED_FIXEDPOINT_CONSTANT(F0, 1518500250, std::sqrt(2.) / 2.);
x = x * fixedpoint_half_sqrt_2;
*output_inv_sqrt = x.raw();
if (*output_shift < 0) {
*output_inv_sqrt <<= -*output_shift;
*output_shift = 0;
}
// Convert right shift (right is positive) to left shift.
*output_shift *= reverse_shift;
}
// DO NOT USE THIS STRUCT FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
// BROADCASTING.
//
// NdArrayDesc<N> describes the shape and memory layout of an N-dimensional
// rectangular array of numbers.
//
// NdArrayDesc<N> is basically identical to Dims<N> defined in types.h.
// However, as Dims<N> is to be deprecated, this class exists as an adaptor
// to enable simple unoptimized implementations of element-wise broadcasting
// operations.
template <int N>
struct NdArrayDesc {
// The "extent" of each dimension. Indices along dimension d must be in the
// half-open interval [0, extents[d]).
int extents[N];
// The number of *elements* (not bytes) between consecutive indices of each
// dimension.
int strides[N];
};
// DO NOT USE THIS FUNCTION FOR NEW FUNCTIONALITY BEYOND IMPLEMENTING
// BROADCASTING.
//
// Same as Offset(), except takes as NdArrayDesc<N> instead of Dims<N>.
inline int SubscriptToIndex(const NdArrayDesc<4>& desc, int i0, int i1, int i2,
int i3) {
TFLITE_DCHECK(i0 >= 0 && i0 < desc.extents[0]);
TFLITE_DCHECK(i1 >= 0 && i1 < desc.extents[1]);
TFLITE_DCHECK(i2 >= 0 && i2 < desc.extents[2]);
TFLITE_DCHECK(i3 >= 0 && i3 < desc.extents[3]);
return i0 * desc.strides[0] + i1 * desc.strides[1] + i2 * desc.strides[2] +
i3 * desc.strides[3];
}
inline int SubscriptToIndex(const NdArrayDesc<5>& desc, int indexes[5]) {
return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
indexes[4] * desc.strides[4];
}
inline int SubscriptToIndex(const NdArrayDesc<8>& desc, int indexes[8]) {
return indexes[0] * desc.strides[0] + indexes[1] * desc.strides[1] +
indexes[2] * desc.strides[2] + indexes[3] * desc.strides[3] +
indexes[4] * desc.strides[4] + indexes[5] * desc.strides[5] +
indexes[6] * desc.strides[6] + indexes[7] * desc.strides[7];
}
// Given the dimensions of the operands for an element-wise binary broadcast,
// adjusts them so that they can be directly iterated over with simple loops.
// Returns the adjusted dims as instances of NdArrayDesc in 'desc0_out' and
// 'desc1_out'. 'desc0_out' and 'desc1_out' cannot be nullptr.
//
// This function assumes that the two input shapes are compatible up to
// broadcasting and the shorter one has already been prepended with 1s to be the
// same length. E.g., if shape0 is (1, 16, 16, 64) and shape1 is (1, 64),
// shape1 must already have been prepended to be (1, 1, 1, 64). Recall that
// Dims<N> refer to shapes in reverse order. In this case, input0_dims will be
// (64, 16, 16, 1) and input1_dims will be (64, 1, 1, 1).
//
// When two shapes are compatible up to broadcasting, for each dimension d,
// the input extents are either equal, or one of them is 1.
//
// This function performs the following for each dimension d:
// - If the extents are equal, then do nothing since the loop that walks over
// both of the input arrays is correct.
// - Otherwise, one (and only one) of the extents must be 1. Say extent0 is 1
// and extent1 is e1. Then set extent0 to e1 and stride0 *to 0*. This allows
// array0 to be referenced *at any index* in dimension d and still access the
// same slice.
template <int N>
inline void NdArrayDescsForElementwiseBroadcast(const Dims<N>& input0_dims,
const Dims<N>& input1_dims,
NdArrayDesc<N>* desc0_out,
NdArrayDesc<N>* desc1_out) {
TFLITE_DCHECK(desc0_out != nullptr);
TFLITE_DCHECK(desc1_out != nullptr);
// Copy dims to desc.
for (int i = 0; i < N; ++i) {
desc0_out->extents[i] = input0_dims.sizes[i];
desc0_out->strides[i] = input0_dims.strides[i];
desc1_out->extents[i] = input1_dims.sizes[i];
desc1_out->strides[i] = input1_dims.strides[i];
}
// Walk over each dimension. If the extents are equal do nothing.
// Otherwise, set the desc with extent 1 to have extent equal to the other and
// stride 0.
for (int i = 0; i < N; ++i) {
const int extent0 = ArraySize(input0_dims, i);
const int extent1 = ArraySize(input1_dims, i);
if (extent0 != extent1) {
if (extent0 == 1) {
desc0_out->strides[i] = 0;
desc0_out->extents[i] = extent1;
} else {
TFLITE_DCHECK_EQ(extent1, 1);
desc1_out->strides[i] = 0;
desc1_out->extents[i] = extent0;
}
}
}
}
// Copies dims to desc, calculating strides.
template <int N>
inline void CopyDimsToDesc(const RuntimeShape& input_shape,
NdArrayDesc<N>* desc_out) {
int desc_stride = 1;
for (int i = N - 1; i >= 0; --i) {
desc_out->extents[i] = input_shape.Dims(i);
desc_out->strides[i] = desc_stride;
desc_stride *= input_shape.Dims(i);
}
}
template <int N>
inline void NdArrayDescsForElementwiseBroadcast(
const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
NdArrayDesc<N>* desc0_out, NdArrayDesc<N>* desc1_out) {
TFLITE_DCHECK(desc0_out != nullptr);
TFLITE_DCHECK(desc1_out != nullptr);
auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
// Copy dims to desc, calculating strides.
CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
// Walk over each dimension. If the extents are equal do nothing.
// Otherwise, set the desc with extent 1 to have extent equal to the other and
// stride 0.
for (int i = 0; i < N; ++i) {
const int extent0 = extended_input0_shape.Dims(i);
const int extent1 = extended_input1_shape.Dims(i);
if (extent0 != extent1) {
if (extent0 == 1) {
desc0_out->strides[i] = 0;
desc0_out->extents[i] = extent1;
} else {
TFLITE_DCHECK_EQ(extent1, 1);
desc1_out->strides[i] = 0;
desc1_out->extents[i] = extent0;
}
}
}
}
template <int N>
inline void NdArrayDescsForElementwiseBroadcast(
const RuntimeShape& input0_shape, const RuntimeShape& input1_shape,
const RuntimeShape& input2_shape, NdArrayDesc<N>* desc0_out,
NdArrayDesc<N>* desc1_out, NdArrayDesc<N>* desc2_out) {
TFLITE_DCHECK(desc0_out != nullptr);
TFLITE_DCHECK(desc1_out != nullptr);
TFLITE_DCHECK(desc2_out != nullptr);
auto extended_input0_shape = RuntimeShape::ExtendedShape(N, input0_shape);
auto extended_input1_shape = RuntimeShape::ExtendedShape(N, input1_shape);
auto extended_input2_shape = RuntimeShape::ExtendedShape(N, input2_shape);
// Copy dims to desc, calculating strides.
CopyDimsToDesc<N>(extended_input0_shape, desc0_out);
CopyDimsToDesc<N>(extended_input1_shape, desc1_out);
CopyDimsToDesc<N>(extended_input2_shape, desc2_out);
// Walk over each dimension. If the extents are equal do nothing.
// Otherwise, set the desc with extent 1 to have extent equal to the other and
// stride 0.
for (int i = 0; i < N; ++i) {
const int extent0 = extended_input0_shape.Dims(i);
const int extent1 = extended_input1_shape.Dims(i);
const int extent2 = extended_input2_shape.Dims(i);
int extent = extent0;
if (extent1 != 1) extent = extent1;
if (extent2 != 1) extent = extent2;
TFLITE_DCHECK(extent0 == 1 || extent0 == extent);
TFLITE_DCHECK(extent1 == 1 || extent1 == extent);
TFLITE_DCHECK(extent2 == 1 || extent2 == extent);
if (!(extent0 == extent1 && extent1 == extent2)) {
if (extent0 == 1) {
desc0_out->strides[i] = 0;
desc0_out->extents[i] = extent;
}
if (extent1 == 1) {
desc1_out->strides[i] = 0;
desc1_out->extents[i] = extent;
}
if (extent2 == 1) {
desc2_out->strides[i] = 0;
desc2_out->extents[i] = extent;
}
}
}
}
// Detailed implementation of NDOpsHelper, the indexes must be a zero array.
// This implementation is equivalent to N nested loops. Ex, if N=4, it can be
// re-writen as:
// for (int b = 0; b < output.extents[0]; ++b) {
// for (int y = 0; y < output.extents[1]; ++y) {
// for (int x = 0; x < output.extents[2]; ++x) {
// for (int c = 0; c < output.extents[3]; ++c) {
// calc({b,y,x,c});
// }
// }
// }
// }
template <int N, int DIM, typename Calc>
typename std::enable_if<DIM != N - 1, void>::type NDOpsHelperImpl(
const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
NDOpsHelperImpl<N, DIM + 1, Calc>(output, calc, indexes);
}
}
template <int N, int DIM, typename Calc>
typename std::enable_if<DIM == N - 1, void>::type NDOpsHelperImpl(
const NdArrayDesc<N>& output, const Calc& calc, int indexes[N]) {
for (indexes[DIM] = 0; indexes[DIM] < output.extents[DIM]; ++indexes[DIM]) {
calc(indexes);
}
}
// Execute the calc function in the innermost iteration based on the shape of
// the output. The calc function should take a single argument of type int[N].
template <int N, typename Calc>
inline void NDOpsHelper(const NdArrayDesc<N>& output, const Calc& calc) {
int indexes[N] = {0};
NDOpsHelperImpl<N, 0, Calc>(output, calc, indexes);
}
// Copied from gemmlowp::RoundDown when we dropped direct dependency on
// gemmlowp.
//
// Returns the runtime argument rounded down to the nearest multiple of
// the fixed Modulus.
template <unsigned Modulus, typename Integer>
Integer RoundDown(Integer i) {
return i - (i % Modulus);
}
// Copied from gemmlowp::RoundUp when we dropped direct dependency on
// gemmlowp.
//
// Returns the runtime argument rounded up to the nearest multiple of
// the fixed Modulus.
template <unsigned Modulus, typename Integer>
Integer RoundUp(Integer i) {
return RoundDown<Modulus>(i + Modulus - 1);
}
// Copied from gemmlowp::CeilQuotient when we dropped direct dependency on
// gemmlowp.
//
// Returns the quotient a / b rounded up ('ceil') to the nearest integer.
template <typename Integer>
Integer CeilQuotient(Integer a, Integer b) {
return (a + b - 1) / b;
}
// This function is a copy of gemmlowp::HowManyThreads, copied when we dropped
// the direct dependency of internal/optimized/ on gemmlowp.
//
// It computes a reasonable number of threads to use for a GEMM of shape
// (rows, cols, depth).
//
// TODO(b/131910176): get rid of this function by switching each call site
// to its own more sensible logic for its own workload.
template <int KernelRows>
inline int LegacyHowManyThreads(int max_num_threads, int rows, int cols,
int depth) {
// Early-exit in the default case where multi-threading is disabled.
if (max_num_threads == 1) {
return 1;
}
// Ensure that each thread has KernelRows rows to process, if at all possible.
int thread_count = std::min(max_num_threads, rows / KernelRows);
// Limit the number of threads according to the overall size of the problem.
if (thread_count > 1) {
// Empirically determined value.
static constexpr std::uint64_t min_cubic_size_per_thread = 64 * 1024;
// We can only multiply two out of three sizes without risking overflow
const std::uint64_t cubic_size =
std::uint64_t(rows) * std::uint64_t(cols) * std::uint64_t(depth);
thread_count = std::min(
thread_count, static_cast<int>(cubic_size / min_cubic_size_per_thread));
}
if (thread_count < 1) {
thread_count = 1;
}
assert(thread_count > 0 && thread_count <= max_num_threads);
return thread_count;
}
template <typename T>
void optimized_ops_preload_l1_stream(const T* ptr) {
#ifdef __GNUC__
// builtin offered by GCC-compatible compilers including clang
__builtin_prefetch(ptr, /* 0 means read */ 0, /* 0 means no locality */ 0);
#else
(void)ptr;
#endif
}
template <typename T>
void optimized_ops_preload_l1_keep(const T* ptr) {
#ifdef __GNUC__
// builtin offered by GCC-compatible compilers including clang
__builtin_prefetch(ptr, /* 0 means read */ 0, /* 3 means high locality */ 3);
#else
(void)ptr;
#endif
}
template <typename T>
void optimized_ops_prefetch_write_l1_keep(const T* ptr) {
#ifdef __GNUC__
// builtin offered by GCC-compatible compilers including clang
__builtin_prefetch(ptr, /* 1 means write */ 1, /* 3 means high locality */ 3);
#else
(void)ptr;
#endif
}
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMMON_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/common.h | C++ | apache-2.0 | 41,969 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_
#include <cstdint>
#include "tensorflow/lite/kernels/op_macros.h"
#ifndef TFLITE_DCHECK
#define TFLITE_DCHECK(condition) (condition) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
#ifndef TFLITE_DCHECK_EQ
#define TFLITE_DCHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
#ifndef TFLITE_DCHECK_NE
#define TFLITE_DCHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
#ifndef TFLITE_DCHECK_GE
#define TFLITE_DCHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
#ifndef TFLITE_DCHECK_GT
#define TFLITE_DCHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
#ifndef TFLITE_DCHECK_LE
#define TFLITE_DCHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
#ifndef TFLITE_DCHECK_LT
#define TFLITE_DCHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ASSERT_FALSE
#endif
// TODO(ahentz): Clean up: We should stick to the DCHECK versions.
#ifndef TFLITE_CHECK
#define TFLITE_CHECK(condition) (condition) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TFLITE_CHECK_EQ
#define TFLITE_CHECK_EQ(x, y) ((x) == (y)) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TFLITE_CHECK_NE
#define TFLITE_CHECK_NE(x, y) ((x) != (y)) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TFLITE_CHECK_GE
#define TFLITE_CHECK_GE(x, y) ((x) >= (y)) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TFLITE_CHECK_GT
#define TFLITE_CHECK_GT(x, y) ((x) > (y)) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TFLITE_CHECK_LE
#define TFLITE_CHECK_LE(x, y) ((x) <= (y)) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TFLITE_CHECK_LT
#define TFLITE_CHECK_LT(x, y) ((x) < (y)) ? (void)0 : TFLITE_ABORT
#endif
#ifndef TF_LITE_STATIC_MEMORY
// TODO(b/162019032): Consider removing these type-aliases.
using int8 = std::int8_t;
using uint8 = std::uint8_t;
using int16 = std::int16_t;
using uint16 = std::uint16_t;
using int32 = std::int32_t;
using uint32 = std::uint32_t;
#endif // !defined(TF_LITE_STATIC_MEMORY)
// TFLITE_DEPRECATED()
//
// Duplicated from absl/base/macros.h to avoid pulling in that library.
// Marks a deprecated class, struct, enum, function, method and variable
// declarations. The macro argument is used as a custom diagnostic message (e.g.
// suggestion of a better alternative).
//
// Example:
//
// class TFLITE_DEPRECATED("Use Bar instead") Foo {...};
// TFLITE_DEPRECATED("Use Baz instead") void Bar() {...}
//
// Every usage of a deprecated entity will trigger a warning when compiled with
// clang's `-Wdeprecated-declarations` option. This option is turned off by
// default, but the warnings will be reported by clang-tidy.
#if defined(__clang__) && __cplusplus >= 201103L
#define TFLITE_DEPRECATED(message) __attribute__((deprecated(message)))
#endif
#ifndef TFLITE_DEPRECATED
#define TFLITE_DEPRECATED(message)
#endif
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_COMPATIBILITY_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/compatibility.h | C++ | apache-2.0 | 3,587 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
#include <cmath>
namespace tflite {
#if defined(TF_LITE_USE_GLOBAL_CMATH_FUNCTIONS) || \
(defined(__ANDROID__) && !defined(__NDK_MAJOR__)) || defined(ARDUINO) || \
defined(__ZEPHYR__)
#define TF_LITE_GLOBAL_STD_PREFIX
#else
#define TF_LITE_GLOBAL_STD_PREFIX std
#endif
#define DECLARE_STD_GLOBAL_SWITCH1(tf_name, std_name) \
template <class T> \
inline T tf_name(const T x) { \
return TF_LITE_GLOBAL_STD_PREFIX::std_name(x); \
}
DECLARE_STD_GLOBAL_SWITCH1(TfLiteRound, round);
DECLARE_STD_GLOBAL_SWITCH1(TfLiteExpm1, expm1);
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_CPPMATH_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/cppmath.h | C++ | apache-2.0 | 1,485 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_KERNEL_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_KERNEL_UTILS_H_
#include "tensorflow/lite/c/builtin_op_data.h"
namespace tflite {
namespace kernel_utils {
// Performs an RNN batch inference step for inputs specified by input_ptr_batch.
// The RNN cell is specified by the pointers to its input and recurrent weights,
// and biases, along with the input size, number of units, activation.
//
// The pointers to the hidden state and the output are updated as a result.
//
// The pointers with the suffix "_batch" point to data aligned in batch_major
// order, and each step processes batch_size many inputs from input_ptr_batch,
// and updates batch_size many outputs and hidden states.
//
// The output_batch_dim is output.shape[-1], i.e. the outermost dimension of the
// output tensor, and in most cases will be equal to num_units. It is usually
// not when we want to store the RNN output into a slice of the output tensor,
// e.g. for bidirectional RNNs with merge_outputs. In this case, the batched
// operations cannot be used since they assume that the batched outputs are
// contiguous, and we manually loop over the batched outputs.
void RnnBatchStep(const float* input_ptr_batch, const float* input_weights_ptr,
const float* recurrent_weights_ptr, const float* bias_ptr,
int input_size, int num_units, int batch_size,
int output_batch_leading_dim,
TfLiteFusedActivation activation,
float* hidden_state_ptr_batch, float* output_ptr_batch);
// Same as above but includes an auxiliary input with the corresponding weights.
void RnnBatchStep(const float* input_ptr_batch, const float* input_weights_ptr,
const float* aux_input_ptr_batch,
const float* aux_input_weights_ptr,
const float* recurrent_weights_ptr, const float* bias_ptr,
int input_size, int aux_input_size, int num_units,
int batch_size, int output_batch_leading_dim,
TfLiteFusedActivation activation,
float* hidden_state_ptr_batch, float* output_ptr_batch);
// Performs a quantized RNN batch inference step. Same as above, but for
// quantization purposes, we also pass in quantized_hidden_state_ptr_batch and
// quantized_input_ptr_batch pointers for temporary storage of the quantized
// values of hidden_state_ptr_batch and input_ptr_batch, respectively.
// These temporary storages are expected to be preallocated to the same size as
// the respective pointers.
// An additional preallocated temporary storage 'scaling_factors' (of size
// batch_size) is used to store the scaling factors of the quantization (used
// for recovery).
// {input,recurrent}_weights_scale params are used for dequantization/recovery.
void RnnBatchStep(
const float* input_ptr_batch, const int8_t* input_weights_ptr,
float input_weights_scale, const int8_t* recurrent_weights_ptr,
float recurrent_weights_scale, const float* bias_ptr, int input_size,
int num_units, int batch_size, int output_batch_leading_dim,
TfLiteFusedActivation activation, int8_t* quantized_input_ptr_batch,
int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors,
float* hidden_state_ptr_batch, float* output_ptr_batch,
bool asymmetric_quantize_inputs, int32_t* zero_points,
int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums);
void RnnBatchStep(
const float* input_ptr_batch, const int8_t* input_weights_ptr,
float input_weights_scale, const float* aux_input_ptr_batch,
const int8_t* aux_input_weights_ptr, float aux_input_weights_scale,
const int8_t* recurrent_weights_ptr, float recurrent_weights_scale,
const float* bias_ptr, int input_size, int aux_input_size, int num_units,
int batch_size, int output_batch_leading_dim,
TfLiteFusedActivation activation, int8_t* quantized_input_ptr_batch,
int8_t* aux_quantized_input_ptr_batch,
int8_t* quantized_hidden_state_ptr_batch, float* scaling_factors,
float* hidden_state_ptr_batch, float* output_ptr_batch,
bool asymmetric_quantize_inputs, int32_t* zero_points,
int32_t* accum_scratch, int32_t* row_sums, bool* compute_row_sums);
} // namespace kernel_utils
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_KERNEL_UTILS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/kernel_utils.h | C++ | apache-2.0 | 5,066 |