code stringlengths 1 1.05M | repo_name stringlengths 6 83 | path stringlengths 3 242 | language stringclasses 222
values | license stringclasses 20
values | size int64 1 1.05M |
|---|---|---|---|---|---|
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_LEGACY_TYPES_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_LEGACY_TYPES_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
// TODO(b/116772710): Insert legacy Dims<> code in here.
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_LEGACY_TYPES_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/legacy_types.h | C++ | apache-2.0 | 999 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
#include <cmath>
namespace tflite {
#if defined(TF_LITE_USE_GLOBAL_MAX) || defined(__ZEPHYR__)
inline float TfLiteMax(const float& x, const float& y) {
return std::max(x, y);
}
#else
template <class T>
inline T TfLiteMax(const T& x, const T& y) {
return std::fmax(x, y);
}
#endif
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MAX_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/max.h | C++ | apache-2.0 | 1,128 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Basic class for computing MFCCs from spectrogram slices.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_H_
#include <vector>
#include "tensorflow/lite/kernels/internal/mfcc_dct.h"
#include "tensorflow/lite/kernels/internal/mfcc_mel_filterbank.h"
namespace tflite {
namespace internal {
class Mfcc {
public:
Mfcc();
bool Initialize(int input_length, double input_sample_rate);
// Input is a single squared-magnitude spectrogram frame. The input spectrum
// is converted to linear magnitude and weighted into bands using a
// triangular mel filterbank, and a discrete cosine transform (DCT) of the
// values is taken. Output is populated with the lowest dct_coefficient_count
// of these values.
void Compute(const std::vector<double>& spectrogram_frame,
std::vector<double>* output) const;
void set_upper_frequency_limit(double upper_frequency_limit) {
// CHECK(!initialized_) << "Set frequency limits before calling
// Initialize.";
upper_frequency_limit_ = upper_frequency_limit;
}
void set_lower_frequency_limit(double lower_frequency_limit) {
// CHECK(!initialized_) << "Set frequency limits before calling
// Initialize.";
lower_frequency_limit_ = lower_frequency_limit;
}
void set_filterbank_channel_count(int filterbank_channel_count) {
/// CHECK(!initialized_) << "Set channel count before calling Initialize.";
filterbank_channel_count_ = filterbank_channel_count;
}
void set_dct_coefficient_count(int dct_coefficient_count) {
// CHECK(!initialized_) << "Set coefficient count before calling
// Initialize.";
dct_coefficient_count_ = dct_coefficient_count;
}
private:
MfccMelFilterbank mel_filterbank_;
MfccDct dct_;
bool initialized_;
double lower_frequency_limit_;
double upper_frequency_limit_;
int filterbank_channel_count_;
int dct_coefficient_count_;
};
} // namespace internal
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/mfcc.h | C++ | apache-2.0 | 2,709 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Basic minimal DCT class for MFCC speech processing.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_DCT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_DCT_H_
#include <vector>
namespace tflite {
namespace internal {
class MfccDct {
public:
MfccDct();
bool Initialize(int input_length, int coefficient_count);
void Compute(const std::vector<double>& input,
std::vector<double>* output) const;
private:
bool initialized_;
int coefficient_count_;
int input_length_;
std::vector<std::vector<double> > cosines_;
};
} // namespace internal
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_DCT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/mfcc_dct.h | C++ | apache-2.0 | 1,325 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// Basic class for applying a mel-scale mapping to a power spectrum.
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_MEL_FILTERBANK_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_MEL_FILTERBANK_H_
#include <vector>
namespace tflite {
namespace internal {
class MfccMelFilterbank {
public:
MfccMelFilterbank();
bool Initialize(int input_length, // Number of unique FFT bins fftsize/2+1.
double input_sample_rate, int output_channel_count,
double lower_frequency_limit, double upper_frequency_limit);
// Takes a squared-magnitude spectrogram slice as input, computes a
// triangular-mel-weighted linear-magnitude filterbank, and places the result
// in output.
void Compute(const std::vector<double>& input,
std::vector<double>* output) const;
private:
double FreqToMel(double freq) const;
bool initialized_;
int num_channels_;
double sample_rate_;
int input_length_;
std::vector<double> center_frequencies_; // In mel, for each mel channel.
// Each FFT bin b contributes to two triangular mel channels, with
// proportion weights_[b] going into mel channel band_mapper_[b], and
// proportion (1 - weights_[b]) going into channel band_mapper_[b] + 1.
// Thus, weights_ contains the weighting applied to each FFT bin for the
// upper-half of the triangular band.
std::vector<double> weights_; // Right-side weight for this fft bin.
// FFT bin i contributes to the upper side of mel channel band_mapper_[i]
std::vector<int> band_mapper_;
int start_index_; // Lowest FFT bin used to calculate mel spectrum.
int end_index_; // Highest FFT bin used to calculate mel spectrum.
};
} // namespace internal
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MFCC_MEL_FILTERBANK_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/mfcc_mel_filterbank.h | C++ | apache-2.0 | 2,472 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
#include <cmath>
namespace tflite {
#if defined(TF_LITE_USE_GLOBAL_MIN) || defined(__ZEPHYR__)
inline float TfLiteMin(const float& x, const float& y) {
return std::min(x, y);
}
#else
template <class T>
inline T TfLiteMin(const T& x, const T& y) {
return std::fmin(x, y);
}
#endif
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_MIN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/min.h | C++ | apache-2.0 | 1,128 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_BATCH_MATMUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_BATCH_MATMUL_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
inline void BatchMatMul(const RuntimeShape& lhs_shape, const float* lhs_data,
const RuntimeShape& rhs_shape, const float* rhs_data,
const RuntimeShape& output_shape, float* output_data,
CpuBackendContext* context) {
using ::tflite::cpu_backend_gemm::Gemm;
using ::tflite::cpu_backend_gemm::GemmParams;
using ::tflite::cpu_backend_gemm::MatrixParams;
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
// Determine which dimension is the broadcast dimension.
auto broadcast_dim = [](int lhs_dim, int rhs_dim) {
if (lhs_dim == rhs_dim) return lhs_dim;
if (lhs_dim == 1) return rhs_dim;
TFLITE_DCHECK_EQ(rhs_dim, 1);
return lhs_dim;
};
// Compute the "extent" for iterating on this dimension.
// If we are broadcasting, then don't advance (i.e return 0).
auto extent = [](const RuntimeShape& shape, int x) {
if (shape.Dims(x) == 1) {
return 0;
}
int prod = 1;
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
prod *= shape.Dims(i);
}
return prod;
};
const int batch_dim0 =
broadcast_dim(extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 =
broadcast_dim(extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 =
broadcast_dim(extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = extent(extended_lhs_shape, 0);
const int lhs_ext1 = extent(extended_lhs_shape, 1);
const int lhs_ext2 = extent(extended_lhs_shape, 2);
const int rhs_ext0 = extent(extended_rhs_shape, 0);
const int rhs_ext1 = extent(extended_rhs_shape, 1);
const int rhs_ext2 = extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
MatrixParams<float> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = lhs_rows;
lhs_params.cols = accum_depth;
MatrixParams<float> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = accum_depth;
rhs_params.cols = rhs_cols;
MatrixParams<float> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = lhs_rows;
dst_params.cols = rhs_cols;
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const float* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const float* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const float* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const float* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const float* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const float* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
GemmParams<float, float> gemm_params;
cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2,
dst_params, out_ptr, gemm_params, context);
}
}
}
}
inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data,
const RuntimeShape& rhs_shape, const int8_t* rhs_data,
const float* scaling_factors,
const int32_t* input_offset, int32_t* row_sums,
const RuntimeShape& output_shape,
int32_t* accum_scratch, float* output_data,
bool* compute_row_sums, CpuBackendContext* context) {
using ::tflite::cpu_backend_gemm::Gemm;
using ::tflite::cpu_backend_gemm::GemmParams;
using ::tflite::cpu_backend_gemm::MatrixParams;
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
// Determine which dimension is the broadcast dimension.
auto broadcast_dim = [](int lhs_dim, int rhs_dim) {
if (lhs_dim == rhs_dim) return lhs_dim;
if (lhs_dim == 1) return rhs_dim;
TFLITE_DCHECK_EQ(rhs_dim, 1);
return lhs_dim;
};
// Compute the "extent" for iterating on this dimension.
// If we are broadcasting, then don't advance (i.e return 0).
auto extent = [](const RuntimeShape& shape, int x) {
if (shape.Dims(x) == 1) {
return 0;
}
int prod = 1;
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
prod *= shape.Dims(i);
}
return prod;
};
const int batch_dim0 =
broadcast_dim(extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 =
broadcast_dim(extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 =
broadcast_dim(extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = extent(extended_lhs_shape, 0);
const int lhs_ext1 = extent(extended_lhs_shape, 1);
const int lhs_ext2 = extent(extended_lhs_shape, 2);
const int rhs_ext0 = extent(extended_rhs_shape, 0);
const int rhs_ext1 = extent(extended_rhs_shape, 1);
const int rhs_ext2 = extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols;
const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols;
const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols;
const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows;
const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows;
const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows;
if (!compute_row_sums || *compute_row_sums) {
int num_weights_matrices = 1;
for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) {
num_weights_matrices *= extended_lhs_shape.Dims(i);
}
tensor_utils::ReductionSumVector(
lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
MatrixParams<int8_t> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = lhs_rows;
lhs_params.cols = accum_depth;
MatrixParams<int8_t> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = accum_depth;
rhs_params.cols = rhs_cols;
MatrixParams<int32_t> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = lhs_rows;
dst_params.cols = rhs_cols;
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0);
const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0);
const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1);
const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1);
const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1);
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2);
const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2);
const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2);
float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
GemmParams<int32_t, int32_t> gemm_params;
cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2,
dst_params, accum_scratch, gemm_params, context);
for (int j = 0; j < rhs_cols; ++j) {
const float batch_scaling_factor = scale_ptr2[j];
const float batch_offset = static_cast<float>(ioff_ptr2[j]);
int i = 0;
#ifdef USE_NEON
const float32x4_t scaling_factor0 = vdupq_n_f32(batch_scaling_factor);
const float32x4_t scaling_factor1 = vdupq_n_f32(batch_scaling_factor);
const int32x4_t input_offset0 = vdupq_n_s32(-batch_offset);
const int32x4_t input_offset1 = vdupq_n_s32(-batch_offset);
for (; i < lhs_rows - 8; i += 8) {
// Load the row sums;
const int32x4_t row_sum0 = vld1q_s32(woff_ptr2 + i);
const int32x4_t row_sum1 = vld1q_s32(woff_ptr2 + i + 4);
// Load the accumulated values.
int idx = lhs_rows * j + i;
const int32x4_t scratch_val0 = vld1q_s32(accum_scratch + idx);
const int32x4_t scratch_val1 = vld1q_s32(accum_scratch + idx + 4);
const int32x4_t dotprod0 =
vmlaq_s32(scratch_val0, row_sum0, input_offset0);
const int32x4_t dotprod1 =
vmlaq_s32(scratch_val1, row_sum1, input_offset1);
const float32x4_t float_val0 = vcvtq_f32_s32(dotprod0);
const float32x4_t float_val1 = vcvtq_f32_s32(dotprod1);
const float32x4_t result0 = vmlaq_f32(vld1q_f32(out_ptr + idx),
float_val0, scaling_factor0);
const float32x4_t result1 = vmlaq_f32(vld1q_f32(out_ptr + idx + 4),
float_val1, scaling_factor1);
vst1q_f32(out_ptr + idx, result0);
vst1q_f32(out_ptr + idx + 4, result1);
}
#endif // USE_NEON
for (; i < lhs_rows; ++i) {
int idx = lhs_rows * j + i;
accum_scratch[idx] -= woff_ptr2[i] * batch_offset;
out_ptr[idx] += batch_scaling_factor * accum_scratch[idx];
}
}
}
}
}
}
inline void BatchMatMul(const FullyConnectedParams& params,
const RuntimeShape& lhs_shape, const int8_t* lhs_data,
const RuntimeShape& rhs_shape, const int8_t* rhs_data,
const RuntimeShape& output_shape, int8_t* output_data,
CpuBackendContext* context) {
using ::tflite::cpu_backend_gemm::Gemm;
using ::tflite::cpu_backend_gemm::GemmParams;
using ::tflite::cpu_backend_gemm::MatrixParams;
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
// Determine which dimension is the broadcast dimension.
auto broadcast_dim = [](int lhs_dim, int rhs_dim) {
if (lhs_dim == rhs_dim) return lhs_dim;
if (lhs_dim == 1) return rhs_dim;
TFLITE_DCHECK_EQ(rhs_dim, 1);
return lhs_dim;
};
// Compute the "extent" for iterating on this dimension.
// If we are broadcasting, then don't advance (i.e return 0).
auto extent = [](const RuntimeShape& shape, int x) {
if (shape.Dims(x) == 1) {
return 0;
}
int prod = 1;
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
prod *= shape.Dims(i);
}
return prod;
};
const int batch_dim0 =
broadcast_dim(extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 =
broadcast_dim(extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 =
broadcast_dim(extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = extent(extended_lhs_shape, 0);
const int lhs_ext1 = extent(extended_lhs_shape, 1);
const int lhs_ext2 = extent(extended_lhs_shape, 2);
const int rhs_ext0 = extent(extended_rhs_shape, 0);
const int rhs_ext1 = extent(extended_rhs_shape, 1);
const int rhs_ext2 = extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
MatrixParams<int8_t> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = lhs_rows;
lhs_params.cols = accum_depth;
lhs_params.zero_point = -filter_offset;
MatrixParams<int8_t> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = accum_depth;
rhs_params.cols = rhs_cols;
rhs_params.zero_point = -input_offset;
MatrixParams<int8_t> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = lhs_rows;
dst_params.cols = rhs_cols;
dst_params.zero_point = output_offset;
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
int8_t* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
GemmParams<int32_t, int8_t> gemm_params;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
gemm_params.multiplier_fixedpoint = output_multiplier;
gemm_params.multiplier_exponent = output_shift;
cpu_backend_gemm::Gemm(lhs_params, lhs_ptr2, rhs_params, rhs_ptr2,
dst_params, out_ptr, gemm_params, context);
}
}
}
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_BATCH_MATMUL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/batch_matmul.h | C++ | apache-2.0 | 15,976 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#if defined __linux__ && defined __aarch64__
#include <sys/auxv.h>
#endif
namespace tflite {
namespace {
// The implementation of dotprod detection is copied from ruy's internal
// function DetectDotprod().
// At the moment it's only implemented on Linux ARM64. Consider syncing again
// with ruy in the future to share improvements.
#if defined __linux__ && defined __aarch64__
bool DetectDotprodByLinuxAuxvMethod() {
// This is the value of HWCAP_ASIMDDP in sufficiently recent Linux headers,
// however we need to support building against older headers for the time
// being.
const int kLocalHwcapAsimddp = 1 << 20;
return getauxval(AT_HWCAP) & kLocalHwcapAsimddp;
}
#endif
} // namespace
bool DetectArmNeonDotprod() {
#if defined __linux__ && defined __aarch64__
return DetectDotprodByLinuxAuxvMethod();
#endif
return false;
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/cpu_check.cc | C++ | apache-2.0 | 1,613 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_CPU_CHECK_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_CPU_CHECK_H_
// This include is superfluous. However, it's been here for a while, and a
// number of files have been relying on it to include neon_check.h for them.
// This should be removed, but with a global run of presubmits to catch
// any such issues. This requires running more than just TFLite presubmits.
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
namespace tflite {
// On A64, returns true if the dotprod extension is present.
// On other architectures, returns false unconditionally.
bool DetectArmNeonDotprod();
struct CpuFlags {
bool neon_dotprod = false;
};
inline void GetCpuFlags(CpuFlags* cpu_flags) {
cpu_flags->neon_dotprod = DetectArmNeonDotprod();
}
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_CPU_CHECK_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/cpu_check.h | C++ | apache-2.0 | 1,579 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_3X3_FILTER_COMMON_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_3X3_FILTER_COMMON_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
namespace depthwise_conv {
constexpr int kDepthwiseConvScratchWorkspaceSize = 10 * 10 * 64;
constexpr int kDepthwiseConvAdjustedBiasLimit = 64;
// In cases such as depth multiplication, we want to be able to load data from
// the workspace that is beyond the valid range. Macro-block sizes are adjusted
// to allow for this.
constexpr int kWorkspaceExtension = 16;
#ifdef USE_NEON
#ifndef __aarch64__
inline int8x16_t vqtbl4q_s8(int8x16x4_t a, int8x16_t b) {
const uint8x16_t mask = vtstq_s8(b, vdupq_n_s8(8));
// Delete bit 3 from the indices.
const int8x16_t high_bits = vshrq_n_s8(b, 4);
int8x16_t deleted_bit_3 = b;
deleted_bit_3 = vsliq_n_s8(deleted_bit_3, high_bits, 3);
int8x8x4_t repacked_data;
// Calculate for lower indices.
repacked_data.val[0] = vget_low_s8(a.val[0]);
repacked_data.val[1] = vget_low_s8(a.val[1]);
repacked_data.val[2] = vget_low_s8(a.val[2]);
repacked_data.val[3] = vget_low_s8(a.val[3]);
const int8x16_t output_for_lower =
vcombine_s8(vtbl4_s8(repacked_data, vget_low_s8(deleted_bit_3)),
vtbl4_s8(repacked_data, vget_high_s8(deleted_bit_3)));
// Calculate for high indices.
repacked_data.val[0] = vget_high_s8(a.val[0]);
repacked_data.val[1] = vget_high_s8(a.val[1]);
repacked_data.val[2] = vget_high_s8(a.val[2]);
repacked_data.val[3] = vget_high_s8(a.val[3]);
const int8x16_t output_for_higher =
vcombine_s8(vtbl4_s8(repacked_data, vget_low_s8(deleted_bit_3)),
vtbl4_s8(repacked_data, vget_high_s8(deleted_bit_3)));
// Merge.
int8x16_t output = vbslq_s8(mask, output_for_higher, output_for_lower);
return output;
}
#endif // !__aarch64__
// Convenience-compatibility functions.
// Compatibility: Intrinsics reflect a mixture of older and newer ARM
// instructions. This actually results in ZIP1 / ZIP2 asm instructions, but
// one intrinsic is provided. Also older instructions operated in place,
// and it seems more defensive to assume that some versions of intrinsics
// might reflect this
// Convenience: Callers in these kernels want both ZIP1 and ZIP2, and we do not
// want the calling code to get cluttered with unpacking int8x16x2_t.
inline void vzipq_s8_in_place(int8x16_t* a, int8x16_t* b) {
int8x16x2_t r8x16;
r8x16 = vzipq_s8(*a, *b);
*a = r8x16.val[0];
*b = r8x16.val[1];
}
inline void vzipq_s8x2_in_place(int8x16_t* a, int8x16_t* b) {
int16x8x2_t r16x8;
r16x8 = vzipq_s16(vreinterpretq_s16_s8(*a), vreinterpretq_s16_s8(*b));
*a = vreinterpretq_s8_s16(r16x8.val[0]);
*b = vreinterpretq_s8_s16(r16x8.val[1]);
}
// Similar rationale to the zip-in_place functions, but callers only actually
// need the TRN1 asm instruction result.
inline void vtrn1_s8x2_in_place(int8x16_t* a, int8x16_t* b) {
int16x8x2_t r16x8;
r16x8 = vtrnq_s16(vreinterpretq_s16_s8(*a), vreinterpretq_s16_s8(*b));
*a = vreinterpretq_s8_s16(r16x8.val[0]);
}
// Similar rationale to the zip-in_place functions, but callers only actually
// need the ZIP1 or ZIP2 asm instruction results.
inline int8x16_t vzip1q_s8(int8x16_t a, int8x16_t b) {
return vzipq_s8(a, b).val[0];
}
inline int8x16_t vzip2q_s8(int8x16_t a, int8x16_t b) {
return vzipq_s8(a, b).val[1];
}
inline void biregister_rotate_8(int8x16_t* left, int8x16_t* right) {
*left = vreinterpretq_s8_u32(vshrq_n_u32(vreinterpretq_u32_s8(*left), 8));
*left = vreinterpretq_s8_u32(vsliq_n_u32(vreinterpretq_u32_s8(*left),
vreinterpretq_u32_s8(*right), 24));
*right = vreinterpretq_s8_u32(vshrq_n_u32(vreinterpretq_u32_s8(*right), 8));
}
#ifndef __aarch64__
inline int32x4_t vpaddq_s32(int32x4_t a, int32x4_t b) {
int32x4x2_t deinterleaved = vuzpq_s32(a, b);
return vqaddq_s32(deinterleaved.val[0], deinterleaved.val[1]);
}
#endif // !__aarch64__
#ifdef __ARM_FEATURE_DOTPROD
// The vdotq_lane_s32 takes int8x8t for the rhs parameter, whereas the actual
// instruction selects from between 4 32-bit (4x8-bit packed) sub-registers, an
// unusual interpretation of "lane".
inline int32x4_t vdotq_four_lane_s32(int32x4_t acc, int8x16_t lhs,
int8x16_t rhs, const int lane) {
switch (lane) {
case 0:
return vdotq_lane_s32(acc, lhs, vreinterpret_s32_s8(vget_low_s8(rhs)), 0);
case 1:
return vdotq_lane_s32(acc, lhs, vreinterpret_s32_s8(vget_low_s8(rhs)), 1);
case 2:
return vdotq_lane_s32(acc, lhs, vreinterpret_s32_s8(vget_high_s8(rhs)),
0);
case 3:
default:
return vdotq_lane_s32(acc, lhs, vreinterpret_s32_s8(vget_high_s8(rhs)),
1);
}
}
#else
inline int32x4_t vdotq_s32(int32x4_t acc, int8x16_t lhs, int8x16_t rhs) {
int32x4_t sum0 = vpaddlq_s16(vmull_s8(vget_low_s8(lhs), vget_low_s8(rhs)));
int32x4_t sum1 = vpaddlq_s16(vmull_s8(vget_high_s8(lhs), vget_high_s8(rhs)));
int32x4_t sum = vpaddq_s32(sum0, sum1);
return vaddq_s32(acc, sum);
}
inline int32x4_t vdotq_four_lane_s32(int32x4_t acc, int8x16_t lhs,
int8x16_t rhs, int lane) {
int8x8_t lane_rhs;
if (lane == 0) {
lane_rhs = vreinterpret_s8_s32(
vdup_lane_s32(vreinterpret_s32_s8(vget_low_s8(rhs)), 0));
} else if (lane == 1) {
lane_rhs = vreinterpret_s8_s32(
vdup_lane_s32(vreinterpret_s32_s8(vget_low_s8(rhs)), 1));
} else if (lane == 2) {
lane_rhs = vreinterpret_s8_s32(
vdup_lane_s32(vreinterpret_s32_s8(vget_high_s8(rhs)), 0));
} else {
lane_rhs = vreinterpret_s8_s32(
vdup_lane_s32(vreinterpret_s32_s8(vget_high_s8(rhs)), 1));
}
int32x4_t sum0 = vpaddlq_s16(vmull_s8(vget_low_s8(lhs), lane_rhs));
int32x4_t sum1 = vpaddlq_s16(vmull_s8(vget_high_s8(lhs), lane_rhs));
int32x4_t sum = vpaddq_s32(sum0, sum1);
return vaddq_s32(acc, sum);
}
#endif // !__ARM_FEATURE_DOTPROD
#endif // ARM NEON
// This structure is typically used for reducing the magnitude of outputs, and
// the historical name reflects that.
template <DepthwiseConvOutputRounding output_rounding>
struct DivideByPOT {};
template <>
struct DivideByPOT<DepthwiseConvOutputRounding::kAwayFromZero> {
template <typename IntegerType>
static inline IntegerType Run(IntegerType x, int exponent) {
return RoundingDivideByPOT(x, exponent);
}
// Mult versions use the exponents directly, rather than negated.
template <typename IntegerType>
static inline IntegerType RunMult(IntegerType x, int exponent) {
return RoundingDivideByPOT(x, -exponent);
}
};
#ifdef USE_NEON
template <>
struct DivideByPOT<DepthwiseConvOutputRounding::kUpward> {
template <typename IntegerType>
static inline IntegerType Run(IntegerType x, int exponent) {
return vqrshlq_s32(x, vdupq_n_s32(static_cast<int32>(-exponent)));
}
template <typename IntegerType>
static inline IntegerType RunMult(IntegerType x, IntegerType exponent) {
return vqrshlq_s32(x, exponent);
}
template <typename IntegerType>
static inline IntegerType RunMult(IntegerType x, int exponent) {
return vqrshlq_s32(x, vdupq_n_s32(static_cast<int32>(exponent)));
}
};
#endif // ARM NEON
// See CategorizeDotProductKernel for definitive taxonomy.
enum class DotProduct3x3KernelType {
kNone = 0, // Parameter combination is not supported for dot product kernels.
kPlain,
kWithDepthMultiplicationStride1,
kWithDepthMultiplicationStride2,
kStride2,
};
enum class QuantizationType {
kNonPerChannelUint8 = 0,
kPerChannelInt8 = 1,
};
template <QuantizationType quantization_type>
struct QuantizationTypeImpl {};
template <>
struct QuantizationTypeImpl<QuantizationType::kNonPerChannelUint8> {
typedef uint8 ExternalType;
static constexpr int kIntSymmetricZeroPoint = 128;
static constexpr uint8 kUint8SignBit = 0x80;
};
template <>
struct QuantizationTypeImpl<QuantizationType::kPerChannelInt8> {
typedef int8 ExternalType;
static constexpr int kIntSymmetricZeroPoint = 0;
static constexpr uint8 kUint8SignBit = 0x0;
};
template <
QuantizationType quantization_type = QuantizationType::kNonPerChannelUint8>
inline DotProduct3x3KernelType CategorizeDotProductKernel(
const RuntimeShape& input_shape, const RuntimeShape& filter_shape,
const RuntimeShape& output_shape, const DepthwiseParams& params,
const int32* output_shift_ptr = nullptr) {
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int padding =
std::max(params.padding_values.width, params.padding_values.height);
const int stride = params.stride_width;
const int32 input_depth = input_shape.Dims(3);
const int32 depth_multiplier = params.depth_multiplier;
const int32 filter_height = filter_shape.Dims(1);
const int32 filter_width = filter_shape.Dims(2);
bool supported = stride == params.stride_height && stride <= 2 &&
padding <= 1 && filter_width == 3 && filter_height == 3 &&
params.dilation_width_factor == 1 &&
params.dilation_height_factor == 1 &&
(((input_depth % 8) == 0 && depth_multiplier == 1) ||
(input_depth == 1 && depth_multiplier > 1));
if (!supported) {
return DotProduct3x3KernelType::kNone;
}
if (params.weights_offset != -kSymmetricZeroPoint) {
return DotProduct3x3KernelType::kNone;
}
if (quantization_type == QuantizationType::kPerChannelInt8) {
if (output_shift_ptr == nullptr) {
return DotProduct3x3KernelType::kNone;
}
} else if (params.output_shift > 0) {
return DotProduct3x3KernelType::kNone;
}
if (params.depth_multiplier == 1) {
if (stride == 1) {
return DotProduct3x3KernelType::kPlain;
} else if (stride == 2) {
return DotProduct3x3KernelType::kStride2;
} else {
return DotProduct3x3KernelType::kNone;
}
} else {
if (stride == 1) {
return DotProduct3x3KernelType::kWithDepthMultiplicationStride1;
} else if (stride == 2) {
return DotProduct3x3KernelType::kWithDepthMultiplicationStride2;
} else {
return DotProduct3x3KernelType::kNone;
}
}
}
// Encapsulates constant parameters used in DepthwiseConv.
// 64-bit is used for types that will be added to 64-bit addresses in asm.
struct DepthwiseConvParams {
int64_t input_depth;
int64_t input_row_size;
int64_t output_depth;
int64_t output_row_size;
int64_t filter_row_size;
int32 input_offset;
int32 output_offset;
int32 filter_offset;
int32 output_multiplier;
int32 output_activation_min;
int32 output_activation_max;
int32 output_right_shift;
int32 input_width;
int32 input_height;
int32 stride_width;
int32 stride_height;
int32 output_width;
int32 output_height;
float float_output_activation_min;
float float_output_activation_max;
};
// Encapsulates constant parameters used in DepthwiseConv using dot-product ops.
// 64-bit is used for types that will be added to 64-bit addresses in asm.
//
// This structure is specifically designed for use in asm.
struct DepthwiseConvDotProdParams {
int64_t input_depth;
int64_t output_depth;
int32 stride;
int32 bias_increment;
//
int32 input_offset;
int32 output_offset;
int32 output_multiplier;
int32 output_shift;
int32 quantized_activation_min;
int32 quantized_activation_max;
//
int32 padding_left;
int32 padding_right;
int32 padding_top;
int32 padding_bottom;
//
int32 depth_micro_repeats;
//
int32 width_macro_count;
int32 input_width_overall_micro_repeats;
int32 input_width_micro_repeats;
int32 residual_width;
int32 output_width_overall_micro_repeats;
int32 output_width_micro_repeats;
int32 output_residual_width;
int32 workspace_width_micro_repeats;
//
int32 height_macro_count;
int32 inbound_block_height;
int32 outbound_block_height;
int32 input_height_stride;
int32 output_height_stride;
int32 workspace_height_stride;
//
int32 four_over_stride;
//
const int32* output_multiplier_per_channel;
const int32* output_shift_per_channel;
};
template <DepthwiseConvOutputRounding output_rounding, int32 kDepth,
int32 kStrideWidth, int32 kStrideHeight>
struct DepthwiseConvWindow {};
template <DepthwiseConvOutputRounding output_rounding, int32 kDepth,
int32 kStrideWidth, int32 kStrideHeight>
struct DepthwiseConvWindowPerChannel {};
enum class EdgeType { kCorner, kHorizontal, kVertical, kCenter };
template <DepthwiseConvOutputRounding output_rounding, EdgeType kEdgeType,
int kPadWidth, int kPadHeight>
struct DepthwiseConvPartial {};
template <DepthwiseConvOutputRounding output_rounding, EdgeType kEdgeType,
int kPadWidth, int kPadHeight>
struct DepthwiseConvPartialPerChannel {};
// Copies a subset of the input designated by |input_ptr| into |output_ptr|
// with the specified output dimensions. Supports output depths of 64 only as
// this is the cache line size.
template <typename T>
inline void ShuffleInput(const T* input_ptr, int64_t input_depth,
int32 input_width, int32 input_height,
int64_t output_depth, int32 output_width,
int32 output_height, T* output_ptr) {
const int64_t input_row_size = input_depth * input_width;
for (int32 y = 0; y < output_height; y++) {
const T* ptr = input_ptr;
for (int32 x = 0; x < output_width; x++) {
memcpy(output_ptr, ptr, output_depth);
output_ptr += output_depth;
ptr += input_depth;
}
input_ptr += input_row_size;
}
}
// Calculates the input size depending on stride and output.
inline int32 get_shuffle_input_size(int32 stride, int32 output) {
return stride * (output - 1) + 3;
}
// Indicates the input and output dimensions used when shuffling input
// activations.
struct ShuffleParams {
int32 output_width;
int32 output_height;
int32 input_width;
int32 input_height;
ShuffleParams() = default;
ShuffleParams(int32 output_width, int32 output_height, int32 stride_width,
int32 stride_height)
: output_width(output_width),
output_height(output_height),
input_width(get_shuffle_input_size(stride_width, output_width)),
input_height(get_shuffle_input_size(stride_height, output_height)) {}
};
template <
QuantizationType quantization_type = QuantizationType::kNonPerChannelUint8>
inline bool Fast3x3FilterKernelSupported(
const RuntimeShape& input_shape, const RuntimeShape& filter_shape,
int32 stride_width, int32 stride_height, int32 dilation_width_factor,
int32 dilation_height_factor, int32 pad_width, int32 pad_height,
int32 depth_multiplier, const RuntimeShape& output_shape,
int32 output_shift, const int32* output_shift_ptr = nullptr) {
const int32 input_height = input_shape.Dims(1);
const int32 input_width = input_shape.Dims(2);
const int32 input_depth = input_shape.Dims(3);
const int32 filter_height = filter_shape.Dims(1);
const int32 filter_width = filter_shape.Dims(2);
const int32 output_height = output_shape.Dims(1);
const int32 output_width = output_shape.Dims(2);
bool supported =
filter_width == 3 && filter_height == 3 && depth_multiplier == 1 &&
(stride_width == 1 || stride_width == 2) &&
(stride_height == 1 || stride_height == 2) &&
(stride_width == stride_height) && (pad_width == 0 || pad_width == 1) &&
(pad_height == 0 || pad_height == 1) && (pad_width == pad_height) &&
(input_depth % 8) == 0 && (output_shift <= 0) &&
dilation_width_factor == 1 && dilation_height_factor == 1;
if (!supported) {
return false;
}
// Handle case where padding is zero but padding type is not kValid.
// This would require special boundary case handling that is not supported.
const int32 out_x = output_width - 1;
const int32 out_y = output_height - 1;
const int32 in_x_origin = (out_x * stride_width) - pad_width;
const int32 in_y_origin = (out_y * stride_height) - pad_height;
const int32 in_x_end = in_x_origin + filter_width;
const int32 in_y_end = in_y_origin + filter_height;
// Supported only if filter on the right and bottom boundary lies completely
// within the input if padding is zero.
if (pad_width == 0 && pad_height == 0) {
return in_x_end <= input_width && in_y_end <= input_height;
}
// Else if padding is 1, supported if bottom right filter lies +1 past input
// width and height.
supported = in_x_end <= (input_width + 1) && in_y_end <= (input_height + 1);
if (!supported) {
return false;
}
// Shapes with width 1 and height > 1, and vice versa are not supported yet.
if (input_width == 1) {
supported = (input_width == input_height);
} else if (input_height == 1) {
supported = (input_width == input_height);
}
return supported;
}
// Permute filter data, and adjust bias data to account for symmetric input
// offset. Details are provided in the implementation of the
// kUseCModel3x3DotProduct version.
//
// See the comments preceding DepthwiseConvDotProduct3x3() for further notes.
template <DepthwiseConvImplementation implementation,
QuantizationType quantization_type>
struct ProcessPerDepth {
// Routine is contained in a static Run() method. No default template version
// is supplied, so that all implementations are deliberate choices of template
// specialization.
//
// Note that the signature of the Run() method will be designed for the asm
// implementation rather than conforming to style.
};
// Copy a macro block of data from the input buffer into the workspace,
// permuting data within each micro block.
//
// (a) Copy a macro block of data, padding as required along the width and
// height.
// (b) Transpose the data within each micro block.
//
// See the comments preceding DepthwiseConvDotProduct3x3() for further notes.
template <DepthwiseConvImplementation implementation,
QuantizationType quantization_type,
DepthwiseConvDepthMultiplication depth_multiplication,
int32 max_padding>
struct PackMacroBlock {
// Routine is contained in a static Run() method. No default template version
// is supplied, so that all implementations are deliberate choices of template
// specialization.
//
// Note that the signature of the Run() method will be designed for the asm
// implementation rather than conforming to style.
};
// Apply filter to macro block of input data and store results. Details are
// provided in the implementation of the kUseCModel3x3DotProduct version.
//
// Parameters for repeats and residual sizes are in terms of outputs.
//
// See the comments preceding DepthwiseConvDotProduct3x3() for further notes.
template <DepthwiseConvImplementation implementation,
QuantizationType quantization_type,
DepthwiseConvDepthMultiplication depth_multiplication, int32 stride>
struct KernelMacroBlock {
// Routine is contained in a static Run() method. No default template version
// is supplied, so that all implementations are deliberate choices of template
// specialization.
//
// Note that the signature of the Run() method will be designed for the asm
// implementation rather than conforming to style.
};
#if defined(__aarch64__)
// Experiments suggest that a modest performance improvement is seen, at least
// on 855 chipset big cores, with cache hints.
template <typename T>
inline void PreloadInputBlock(
const T* input_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Preload.
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int input_depth = function_params->input_depth;
const int total_width = 4 * input_width_micro_repeats + residual_width;
const T* row_ptr = input_block_data;
for (int k_height = 0; k_height < block_height; ++k_height) {
const T* ptr = row_ptr;
for (int j = 0; j < total_width; ++j) {
// Input data is loaded once.
optimized_ops_preload_l1_keep(ptr);
ptr += input_depth;
}
row_ptr += input_height_stride;
}
}
#endif // __aarch64__
} // namespace depthwise_conv
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_3X3_FILTER_COMMON_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h | C++ | apache-2.0 | 21,607 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_FLOAT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_FLOAT_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
// Implementation of float DepthwiseConv
template <bool kAllowStrided, int kFixedInputDepth, int kFixedDepthMultiplier>
struct FloatDepthwiseConvKernel {};
#ifdef USE_NEON
template <>
struct FloatDepthwiseConvKernel<false, 8, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x4_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vld1q_f32(filter_ptr + 4 * i);
}
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the inputs
float32x4_t input[4];
for (int i = 0; i < 4; i++) {
input[i] = vld1q_f32(input_ptr + 4 * i);
}
input_ptr += 16;
// Load the accumulators from acc_buffer
float32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlaq_f32(acc[0], input[0], filter[0]);
acc[1] = vmlaq_f32(acc[1], input[1], filter[1]);
acc[2] = vmlaq_f32(acc[2], input[2], filter[0]);
acc[3] = vmlaq_f32(acc[3], input[3], filter[1]);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the inputs
float32x4_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vld1q_f32(input_ptr + 4 * i);
}
input_ptr += 8;
// Load the accumulators from acc_buffer
float32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[i] = vmlaq_f32(acc[i], input[i], filter[i]);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
}
};
template <>
struct FloatDepthwiseConvKernel<false, 2, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
const float32x2_t filters = vld1_f32(filter_ptr);
const float32x4_t filters_dup2 = vcombine_f32(filters, filters);
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the inputs
float32x4_t input[4];
for (int i = 0; i < 4; i++) {
input[i] = vld1q_f32(input_ptr + 4 * i);
}
input_ptr += 16;
// Load the accumulators from acc_buffer
float32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 4; i++) {
acc[i] = vmlaq_f32(acc[i], input[i], filters_dup2);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the inputs
float32x4_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vld1q_f32(input_ptr + 4 * i);
}
input_ptr += 8;
// Load the accumulators from acc_buffer
float32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[i] = vmlaq_f32(acc[i], input[i], filters_dup2);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the inputs
const float32x4_t input = vld1q_f32(input_ptr);
input_ptr += 4;
// Load the accumulators from acc_buffer
float32x4_t acc = vld1q_f32(acc_buffer_ptr);
// Multiply-accumulate
acc = vmlaq_f32(acc, input, filters_dup2);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle 1 output pixel at a time
for (; outp < num_output_pixels; outp++) {
// Load the inputs
const float32x2_t input = vld1_f32(input_ptr);
input_ptr += 2;
// Load the accumulators from acc_buffer
float32x2_t acc = vld1_f32(acc_buffer_ptr);
// Multiply-accumulate
acc = vmla_f32(acc, input, filters);
// Store the accumulators back to acc_buffer
vst1_f32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 0, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const float* local_filter_ptr = filter_ptr;
const float* local_input_ptr = input_ptr;
int ic = 0;
// Handle 16 input channels at a time.
for (; ic <= input_depth - 16; ic += 16) {
// Load the filters
float32x4_t filter_0 = vld1q_f32(local_filter_ptr + 4 * 0);
float32x4_t filter_1 = vld1q_f32(local_filter_ptr + 4 * 1);
float32x4_t filter_2 = vld1q_f32(local_filter_ptr + 4 * 2);
float32x4_t filter_3 = vld1q_f32(local_filter_ptr + 4 * 3);
local_filter_ptr += 16;
// Load the inputs
float32x4_t input_0 = vld1q_f32(local_input_ptr + 4 * 0);
float32x4_t input_1 = vld1q_f32(local_input_ptr + 4 * 1);
float32x4_t input_2 = vld1q_f32(local_input_ptr + 4 * 2);
float32x4_t input_3 = vld1q_f32(local_input_ptr + 4 * 3);
local_input_ptr += 16;
// Load the accumulators from acc_buffer
float32x4_t acc_0 = vld1q_f32(acc_buffer_ptr + 4 * 0);
float32x4_t acc_1 = vld1q_f32(acc_buffer_ptr + 4 * 1);
float32x4_t acc_2 = vld1q_f32(acc_buffer_ptr + 4 * 2);
float32x4_t acc_3 = vld1q_f32(acc_buffer_ptr + 4 * 3);
// Multiply-accumulate
acc_0 = vmlaq_f32(acc_0, input_0, filter_0);
acc_1 = vmlaq_f32(acc_1, input_1, filter_1);
acc_2 = vmlaq_f32(acc_2, input_2, filter_2);
acc_3 = vmlaq_f32(acc_3, input_3, filter_3);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_f32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_f32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_f32(acc_buffer_ptr + 4 * 3, acc_3);
acc_buffer_ptr += 16;
}
// Handle 4 input channels at a time.
for (; ic <= input_depth - 4; ic += 4) {
// Load the filters
float32x4_t filter;
filter = vld1q_f32(local_filter_ptr);
local_filter_ptr += 4;
// Load the inputs
float32x4_t input;
input = vld1q_f32(local_input_ptr);
local_input_ptr += 4;
// Load the accumulators from acc_buffer
float32x4_t acc;
acc = vld1q_f32(acc_buffer_ptr);
// Multiply-accumulate
acc = vmlaq_f32(acc, input, filter);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
const float input_val = *local_input_ptr++;
const float filter_val = *local_filter_ptr++;
*acc_buffer_ptr++ += filter_val * input_val;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 0, 8> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const float* local_filter_ptr = filter_ptr;
const float* local_input_ptr = input_ptr;
int ic = 0;
// Handle 2 input channels at a time.
for (; ic <= input_depth - 2; ic += 2) {
// Load the filters
float32x4_t filter[4];
for (int i = 0; i < 4; i++) {
filter[i] = vld1q_f32(local_filter_ptr + 4 * i);
}
local_filter_ptr += 16;
// Load the inputs
const float32x2_t input = vld1_f32(local_input_ptr);
local_input_ptr += 2;
// Load the accumulators from acc_buffer
float32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlaq_lane_f32(acc[0], filter[0], input, 0);
acc[1] = vmlaq_lane_f32(acc[1], filter[1], input, 0);
acc[2] = vmlaq_lane_f32(acc[2], filter[2], input, 1);
acc[3] = vmlaq_lane_f32(acc[3], filter[3], input, 1);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
// Load the filters
float32x4_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vld1q_f32(local_filter_ptr + 4 * i);
}
local_filter_ptr += 8;
// Load the inputs
const float input_val = *local_input_ptr++;
// Load the accumulators from acc_buffer
float32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[i] = vmlaq_n_f32(acc[i], filter[i], input_val);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
input_ptr += input_ptr_increment;
}
}
};
// Note this implementation is very slow for input_depths < 8
// (e.g. comparable to reference implementation) see, specializations for
// input_depth=3 below.
template <>
struct FloatDepthwiseConvKernel<true, 0, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const float* local_filter_ptr = filter_ptr;
const float* local_input_ptr = input_ptr;
int ic = 0;
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters
float32x4_t filter[4];
for (int i = 0; i < 4; i++) {
filter[i] = vld1q_f32(local_filter_ptr + 4 * i);
}
local_filter_ptr += 16;
// Load the inputs
float32x4x2_t input_dup2[2];
for (int i = 0; i < 2; i++) {
const float32x4_t input = vld1q_f32(local_input_ptr + 4 * i);
input_dup2[i] = vzipq_f32(input, input);
}
local_input_ptr += 8;
// Load the accumulators from acc_buffer
float32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlaq_f32(acc[0], filter[0], input_dup2[0].val[0]);
acc[1] = vmlaq_f32(acc[1], filter[1], input_dup2[0].val[1]);
acc[2] = vmlaq_f32(acc[2], filter[2], input_dup2[1].val[0]);
acc[3] = vmlaq_f32(acc[3], filter[3], input_dup2[1].val[1]);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle 4 input channels at a time.
for (; ic <= input_depth - 4; ic += 4) {
// Load the filters
float32x2_t filter[4];
for (int i = 0; i < 4; i++) {
filter[i] = vld1_f32(local_filter_ptr + 2 * i);
}
local_filter_ptr += 8;
// Load the inputs
const float32x4_t input = vld1q_f32(local_input_ptr);
local_input_ptr += 4;
// Load the accumulators from acc_buffer
float32x2_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1_f32(acc_buffer_ptr + 2 * i);
}
// Multiply-accumulate
acc[0] = vmla_lane_f32(acc[0], filter[0], vget_low_f32(input), 0);
acc[1] = vmla_lane_f32(acc[1], filter[1], vget_low_f32(input), 1);
acc[2] = vmla_lane_f32(acc[2], filter[2], vget_high_f32(input), 0);
acc[3] = vmla_lane_f32(acc[3], filter[3], vget_high_f32(input), 1);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1_f32(acc_buffer_ptr + 2 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
// Handle 2 input channels at a time.
for (; ic <= input_depth - 2; ic += 2) {
// Load the filters
const float32x4_t filter = vld1q_f32(local_filter_ptr);
local_filter_ptr += 4;
// Load the inputs
const float32x2_t input = vld1_f32(local_input_ptr);
local_input_ptr += 2;
// Load the accumulators from acc_buffer
float32x2_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1_f32(acc_buffer_ptr + 2 * i);
}
// Multiply-accumulate
acc[0] = vmla_lane_f32(acc[0], vget_low_f32(filter), input, 0);
acc[1] = vmla_lane_f32(acc[1], vget_high_f32(filter), input, 1);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1_f32(acc_buffer_ptr + 2 * i, acc[i]);
}
acc_buffer_ptr += 4;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
// Load the inputs
const float input_val = *local_input_ptr++;
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc_buffer_ptr[i] += local_filter_ptr[i] * input_val;
}
local_filter_ptr += 2;
acc_buffer_ptr += 2;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 3, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x2_t filter[3];
for (int i = 0; i < 3; i++) {
filter[i] = vld1_f32(filter_ptr + 2 * i);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const float32x2_t input01 = vld1_f32(input_ptr);
const float32x2_t input2 = vld1_dup_f32(input_ptr + 2);
// Load the accumulators from acc_buffer
float32x2_t acc[3];
for (int i = 0; i < 3; i++) {
acc[i] = vld1_f32(acc_buffer_ptr + 2 * i);
}
// Multiply-accumulate for each input channel there 2 outputs
acc[0] = vmla_lane_f32(acc[0], filter[0], input01, 0);
acc[1] = vmla_lane_f32(acc[1], filter[1], input01, 1);
acc[2] = vmla_lane_f32(acc[2], filter[2], input2, 0);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 3; i++) {
vst1_f32(acc_buffer_ptr + 2 * i, acc[i]);
}
acc_buffer_ptr += 6;
input_ptr += input_ptr_increment;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 3, 4> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x4_t filter[3];
for (int i = 0; i < 3; i++) {
filter[i] = vld1q_f32(filter_ptr + 4 * i);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// NOTE: we only want 3 values, so we read it as two ops where
// the second op just duplicates the lane
const float32x2_t input01 = vld1_f32(input_ptr);
const float32x2_t input2 = vld1_dup_f32(input_ptr + 2);
// Load the accumulators from acc_buffer
float32x4_t acc[3];
for (int i = 0; i < 3; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate all outputs.
acc[0] = vmlaq_lane_f32(acc[0], filter[0], input01, 0);
acc[1] = vmlaq_lane_f32(acc[1], filter[1], input01, 1);
acc[2] = vmlaq_lane_f32(acc[2], filter[2], input2, 0);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 3; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 12;
input_ptr += input_ptr_increment;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 1, 8> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x4_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vld1q_f32(filter_ptr + 4 * i);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs
const float input_val = *input_ptr;
input_ptr += input_ptr_increment;
// Load the accumulators from acc_buffer
float32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[i] = vmlaq_n_f32(acc[i], filter[i], input_val);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 1, 32> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x4_t filter_0 = vld1q_f32(filter_ptr + 4 * 0);
float32x4_t filter_1 = vld1q_f32(filter_ptr + 4 * 1);
float32x4_t filter_2 = vld1q_f32(filter_ptr + 4 * 2);
float32x4_t filter_3 = vld1q_f32(filter_ptr + 4 * 3);
float32x4_t filter_4 = vld1q_f32(filter_ptr + 4 * 4);
float32x4_t filter_5 = vld1q_f32(filter_ptr + 4 * 5);
float32x4_t filter_6 = vld1q_f32(filter_ptr + 4 * 6);
float32x4_t filter_7 = vld1q_f32(filter_ptr + 4 * 7);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs
const float input_val = *input_ptr;
input_ptr += input_ptr_increment;
// Load the accumulators from acc_buffer
float32x4_t acc_0 = vld1q_f32(acc_buffer_ptr + 4 * 0);
float32x4_t acc_1 = vld1q_f32(acc_buffer_ptr + 4 * 1);
float32x4_t acc_2 = vld1q_f32(acc_buffer_ptr + 4 * 2);
float32x4_t acc_3 = vld1q_f32(acc_buffer_ptr + 4 * 3);
float32x4_t acc_4 = vld1q_f32(acc_buffer_ptr + 4 * 4);
float32x4_t acc_5 = vld1q_f32(acc_buffer_ptr + 4 * 5);
float32x4_t acc_6 = vld1q_f32(acc_buffer_ptr + 4 * 6);
float32x4_t acc_7 = vld1q_f32(acc_buffer_ptr + 4 * 7);
// Multiply-accumulate
acc_0 = vmlaq_n_f32(acc_0, filter_0, input_val);
acc_1 = vmlaq_n_f32(acc_1, filter_1, input_val);
acc_2 = vmlaq_n_f32(acc_2, filter_2, input_val);
acc_3 = vmlaq_n_f32(acc_3, filter_3, input_val);
acc_4 = vmlaq_n_f32(acc_4, filter_4, input_val);
acc_5 = vmlaq_n_f32(acc_5, filter_5, input_val);
acc_6 = vmlaq_n_f32(acc_6, filter_6, input_val);
acc_7 = vmlaq_n_f32(acc_7, filter_7, input_val);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_f32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_f32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_f32(acc_buffer_ptr + 4 * 3, acc_3);
vst1q_f32(acc_buffer_ptr + 4 * 4, acc_4);
vst1q_f32(acc_buffer_ptr + 4 * 5, acc_5);
vst1q_f32(acc_buffer_ptr + 4 * 6, acc_6);
vst1q_f32(acc_buffer_ptr + 4 * 7, acc_7);
acc_buffer_ptr += 32;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 1, 20> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x4_t filter_0 = vld1q_f32(filter_ptr + 4 * 0);
float32x4_t filter_1 = vld1q_f32(filter_ptr + 4 * 1);
float32x4_t filter_2 = vld1q_f32(filter_ptr + 4 * 2);
float32x4_t filter_3 = vld1q_f32(filter_ptr + 4 * 3);
float32x4_t filter_4 = vld1q_f32(filter_ptr + 4 * 4);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs
const float input_val = *input_ptr;
input_ptr += input_ptr_increment;
// Load the accumulators from acc_buffer
float32x4_t acc_0 = vld1q_f32(acc_buffer_ptr + 4 * 0);
float32x4_t acc_1 = vld1q_f32(acc_buffer_ptr + 4 * 1);
float32x4_t acc_2 = vld1q_f32(acc_buffer_ptr + 4 * 2);
float32x4_t acc_3 = vld1q_f32(acc_buffer_ptr + 4 * 3);
float32x4_t acc_4 = vld1q_f32(acc_buffer_ptr + 4 * 4);
// Multiply-accumulate
acc_0 = vmlaq_n_f32(acc_0, filter_0, input_val);
acc_1 = vmlaq_n_f32(acc_1, filter_1, input_val);
acc_2 = vmlaq_n_f32(acc_2, filter_2, input_val);
acc_3 = vmlaq_n_f32(acc_3, filter_3, input_val);
acc_4 = vmlaq_n_f32(acc_4, filter_4, input_val);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_f32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_f32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_f32(acc_buffer_ptr + 4 * 3, acc_3);
vst1q_f32(acc_buffer_ptr + 4 * 4, acc_4);
acc_buffer_ptr += 20;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 0, 16> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const float* local_filter_ptr = filter_ptr;
const float* local_input_ptr = input_ptr;
for (int ic = 0; ic < input_depth; ic++) {
// Load the filters
float32x4_t filter[4];
for (int i = 0; i < 4; i++) {
filter[i] = vld1q_f32(local_filter_ptr + 4 * i);
}
local_filter_ptr += 16;
// Load the inputs
const float input_val = *local_input_ptr++;
// Load the accumulators from acc_buffer
float32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 4; i++) {
acc[i] = vmlaq_n_f32(acc[i], filter[i], input_val);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 8, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
// Load the filters
float32x4_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vld1q_f32(filter_ptr + 4 * i);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs
float32x4_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vld1q_f32(input_ptr + 4 * i);
}
// Load the accumulators from acc_buffer
float32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_f32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[i] = vmlaq_f32(acc[i], input[i], filter[i]);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_f32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
input_ptr += input_ptr_increment;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 2, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
float32x2_t filter = vld1_f32(filter_ptr);
float32x4_t filter_x4 = vcombine_f32(filter, filter);
int outp = 0;
// Handle two output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the inputs
float32x2_t input_1 = vld1_f32(input_ptr);
input_ptr += input_ptr_increment;
float32x2_t input_2 = vld1_f32(input_ptr);
input_ptr += input_ptr_increment;
float32x4_t input = vcombine_f32(input_1, input_2);
// Load the accumulators from acc_buffer
float32x4_t acc = vld1q_f32(acc_buffer_ptr);
// Multiply-accumulate
acc = vmlaq_f32(acc, input, filter_x4);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the inputs
float32x2_t input = vld1_f32(input_ptr);
input_ptr += input_ptr_increment;
// Load the accumulators from acc_buffer
float32x2_t acc = vld1_f32(acc_buffer_ptr);
// Multiply-accumulate
acc = vmla_f32(acc, input, filter);
// Store the accumulators back to acc_buffer
vst1_f32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct FloatDepthwiseConvKernel<true, 4, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const float* input_ptr, int input_ptr_increment,
const float* filter_ptr, float* acc_buffer_ptr) {
float32x4_t filter = vld1q_f32(filter_ptr);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs
float32x4_t input = vld1q_f32(input_ptr);
// Load the accumulators from acc_buffer
float32x4_t acc = vld1q_f32(acc_buffer_ptr);
// Multiply-accumulate
acc = vmlaq_f32(acc, input, filter);
// Store the accumulators back to acc_buffer
vst1q_f32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
input_ptr += input_ptr_increment;
}
}
};
#endif
// Accumulates the effect of one row of the filter, on a segment of one row
// of the output, accessing the corresponding one row of the input.
template <bool kAllowStrided, int kFixedInputDepth, int kFixedDepthMultiplier>
void FloatDepthwiseConvAccumRow(int stride, int dilation_factor,
int input_depth, int input_width,
const float* input_data, int pad_width,
int depth_multiplier, int filter_width,
const float* filter_data,
int out_x_buffer_start, int out_x_buffer_end,
int output_depth, float* acc_buffer) {
ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__);
// Consistency check parameters. This is important in particular to ensure
// that we keep the number of template instantiations minimal, so we don't
// increase binary size unnecessarily.
static_assert(kFixedDepthMultiplier || !kFixedInputDepth, "");
static_assert(kFixedInputDepth || kAllowStrided, "");
TFLITE_DCHECK(stride == 1 || kAllowStrided);
if (kFixedInputDepth) {
TFLITE_DCHECK_EQ(input_depth, kFixedInputDepth);
}
if (kFixedDepthMultiplier) {
TFLITE_DCHECK_EQ(depth_multiplier, kFixedDepthMultiplier);
}
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
const int input_ptr_increment = stride * input_depth;
const float* filter_base_ptr = filter_data;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
// For the current (filter_x, filter_y) point in the filter,
// compute the boundaries of the corresponding output row segment.
int out_x_loop_start_unclamped = 0;
int out_x_loop_end_unclamped = 0;
if (kAllowStrided) {
if (stride == 2) {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + 1) / 2;
out_x_loop_end_unclamped =
(pad_width + input_width - dilation_factor * filter_x + 1) / 2;
} else if (stride == 4) {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + 3) / 4;
out_x_loop_end_unclamped =
(pad_width + input_width - dilation_factor * filter_x + 3) / 4;
} else {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + stride - 1) / stride;
out_x_loop_end_unclamped = (pad_width + input_width -
dilation_factor * filter_x + stride - 1) /
stride;
}
} else {
out_x_loop_start_unclamped = pad_width - dilation_factor * filter_x;
out_x_loop_end_unclamped =
pad_width + input_width - dilation_factor * filter_x;
}
// The kernel will have to iterate on the segment of the
// output row that starts at out_x_loop_start and out_x_loop_end.
const int out_x_loop_start =
std::max(out_x_buffer_start, out_x_loop_start_unclamped);
const int out_x_loop_end =
std::min(out_x_buffer_end, out_x_loop_end_unclamped);
float* acc_buffer_ptr =
acc_buffer + (out_x_loop_start - out_x_buffer_start) * output_depth;
const int in_x_origin =
(out_x_loop_start * stride) - pad_width + dilation_factor * filter_x;
const float* input_ptr = input_data + in_x_origin * input_depth;
const int num_output_pixels = out_x_loop_end - out_x_loop_start;
FloatDepthwiseConvKernel<kAllowStrided, kFixedInputDepth,
kFixedDepthMultiplier>::Run(num_output_pixels,
input_depth,
depth_multiplier,
input_ptr,
input_ptr_increment,
filter_base_ptr,
acc_buffer_ptr);
filter_base_ptr += output_depth;
}
}
// generic fallback of FloatDepthwiseConvAccumRow, portable, non-templatized.
inline void FloatDepthwiseConvAccumRowGeneric(
int stride, int dilation_factor, int input_depth, int input_width,
const float* input_data, int pad_width, int depth_multiplier,
int filter_width, const float* filter_data, int out_x_buffer_start,
int out_x_buffer_end, int output_depth, float* acc_buffer) {
ruy::profiler::ScopeLabel label("DepthwiseConvAccumRowGeneric (slow)");
const float* filter_base_ptr = filter_data;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int out_x_loop_start = std::max(
out_x_buffer_start,
(pad_width - dilation_factor * filter_x + stride - 1) / stride);
const int out_x_loop_end = std::min(
out_x_buffer_end,
(pad_width + input_width - dilation_factor * filter_x + stride - 1) /
stride);
float* acc_buffer_ptr =
acc_buffer + (out_x_loop_start - out_x_buffer_start) * output_depth;
const int in_x_origin =
(out_x_loop_start * stride) - pad_width + dilation_factor * filter_x;
const float* input_ptr = input_data + in_x_origin * input_depth;
const int input_ptr_increment = (stride - 1) * input_depth;
for (int out_x = out_x_loop_start; out_x < out_x_loop_end; out_x++) {
const float* filter_ptr = filter_base_ptr;
for (int ic = 0; ic < input_depth; ++ic) {
const float input_val = *input_ptr++;
for (int m = 0; m < depth_multiplier; m++) {
const float filter_val = *filter_ptr++;
*acc_buffer_ptr++ += filter_val * input_val;
}
}
input_ptr += input_ptr_increment;
}
filter_base_ptr += output_depth;
}
}
// Initializes the accumulator buffer with bias values.
inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth,
const float* bias_data,
float* acc_buffer) {
// TODO(benoitjacob): This might need optimized specializations
// for small output_depth values, if that ever becomes an important
// case (like it was for some quantized DepthwiseConv cases).
for (int i = 0; i < num_output_pixels; i++) {
memcpy(acc_buffer + i * output_depth, bias_data,
sizeof(acc_buffer[0]) * output_depth);
}
}
// DepthwiseConv can run with multi threads on the dim specified by thread_dim.
// Each thread processes output elements on dim, thread_dim, in the range of
// [thread_start, thread_end).
// For example, assume thread_start = 2, thread_end = 6, and thread_dim = 1, it
// means that it will calculate DepthwiseConv for output_data[:, 2:5, :, :].
//
// The cpu_flags is currently unused. This
// parameter is included so that the signature matches that required by a
// templated function. Other versions, such as quantized, need this parameter.
inline void DepthwiseConvImpl(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const CpuFlags& /* cpu_flags */, int thread_start,
int thread_end, int thread_dim) {
ruy::profiler::ScopeLabel label("DepthwiseConv/float/DepthwiseConvImpl");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
static const int kAccBufferMaxSize = 4832;
float acc_buffer[kAccBufferMaxSize];
TFLITE_DCHECK_GE(kAccBufferMaxSize, output_depth);
const int kOutputPixelsInAccBuffer = kAccBufferMaxSize / output_depth;
const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth;
TFLITE_DCHECK_LE(kOutputPixelsInAccBuffer * output_depth,
kAccBufferActualSize);
TFLITE_DCHECK_LE(kAccBufferActualSize, kAccBufferMaxSize);
TFLITE_DCHECK_GE(kOutputPixelsInAccBuffer, 1);
// row_accum_func will point to the core accumulation function to be used
// for this DepthwiseConv op.
using row_accum_func_t = decltype(&FloatDepthwiseConvAccumRowGeneric);
row_accum_func_t row_accum_func = nullptr;
#define TFMINI_USE_DEPTHWISECONV_KERNEL(ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER) \
if (!row_accum_func && (stride_width == 1 || ALLOW_STRIDED) && \
(input_depth == FIXED_INPUT_DEPTH || FIXED_INPUT_DEPTH == 0) && \
depth_multiplier == FIXED_DEPTH_MULTIPLIER) { \
row_accum_func = \
FloatDepthwiseConvAccumRow<ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER>; \
}
#ifdef USE_NEON
// We go over our list of kernels by decreasing order of preference
// for the cases where multiple kernels could apply.
// Start with the fastest kernels: AllowStrided=false, fixed input depth.
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 1)
// Next come the strided kernels: AllowStrided=true, fixed input depth.
// They are a bit less efficient, but allow stride!=1.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 20)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 32)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 3, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 3, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 4, 1)
// Finally, the kernels allowing a variable input depth,
// these are the least efficient but most general kernels.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 16)
#endif // USE_NEON
#undef TFMINI_USE_DEPTHWISECONV_KERNEL
// No matching fast kernel found, use slow fallback.
if (!row_accum_func) {
row_accum_func = FloatDepthwiseConvAccumRowGeneric;
}
const int input_height_stride = input_shape.Dims(3) * input_shape.Dims(2);
const int input_batch_stride = input_height_stride * input_shape.Dims(1);
const int filter_height_stride = filter_shape.Dims(3) * filter_shape.Dims(2);
// Now that we have determined row_accum_func, we can start work.
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_height;
int output_ptr_offset = 0;
switch (thread_dim) {
case 0:
// Multithread along with the batch axis
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
output_ptr_offset = batch_start * FlatSizeSkipDim(output_shape, 0);
break;
case 1:
// Multithread along with the row axis
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_height);
row_start = thread_start;
row_end = thread_end;
output_ptr_offset = row_start * output_width * output_depth;
break;
}
float* output_ptr = output_data + output_ptr_offset;
int batch_step =
(output_height + row_start - row_end) * output_width * output_depth;
for (int b = batch_start; b < batch_end; ++b) {
for (int out_y = row_start; out_y < row_end; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
const int filter_y_start =
std::max(0, (-in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
const int filter_y_end =
std::min(filter_height,
(input_height - in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
for (int out_x_buffer_start = 0; out_x_buffer_start < output_width;
out_x_buffer_start += kOutputPixelsInAccBuffer) {
const int out_x_buffer_end = std::min(
output_width, out_x_buffer_start + kOutputPixelsInAccBuffer);
// We call a 'pixel' a group of activation that share all but the
// 'depth'/'channel' coordinate. num_output_pixels is the number of
// output pixels that we will accumulate in this loop iteration.
const int num_output_pixels = out_x_buffer_end - out_x_buffer_start;
// Initialize our local accumulator with the bias values, so we don't
// have to add them later.
DepthwiseConvInitAccBuffer(num_output_pixels, output_depth, bias_data,
acc_buffer);
// Accumulation loop. Most of the time should be spent in here.
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
row_accum_func(
stride_width, dilation_width_factor, input_depth, input_width,
input_data + in_y * input_height_stride + b * input_batch_stride,
pad_width, depth_multiplier, filter_width,
filter_data + filter_y * filter_height_stride, out_x_buffer_start,
out_x_buffer_end, output_depth, acc_buffer);
}
// Finished accumulating. Now store to destination.
const int num_output_values = output_depth * num_output_pixels;
int i = 0;
// TODO(benoitjacob) optimized code goes here
#ifdef USE_NEON
// Handle 16 values at a time
for (; i <= num_output_values - 16; i += 16) {
float32x4_t acc[4];
for (int k = 0; k < 4; k++) {
acc[k] = vld1q_f32(acc_buffer + i + 4 * k);
}
for (int k = 0; k < 4; k++) {
acc[k] = vmaxq_f32(
vdupq_n_f32(output_activation_min),
vminq_f32(vdupq_n_f32(output_activation_max), acc[k]));
}
for (int k = 0; k < 4; k++) {
vst1q_f32(output_ptr + 4 * k, acc[k]);
}
output_ptr += 16;
}
// Handle 4 values at a time
for (; i <= num_output_values - 4; i += 4) {
float32x4_t acc = vld1q_f32(acc_buffer + i);
acc = vmaxq_f32(vdupq_n_f32(output_activation_min),
vminq_f32(vdupq_n_f32(output_activation_max), acc));
vst1q_f32(output_ptr, acc);
output_ptr += 4;
}
#endif
// Handle leftover values, one by one. This is very slow.
for (; i < num_output_values; i++) {
float acc = acc_buffer[i];
acc = std::max(output_activation_min,
std::min(output_activation_max, acc));
*output_ptr++ = acc;
}
}
}
output_ptr += batch_step;
}
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_FLOAT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h | C++ | apache-2.0 | 45,131 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_MULTITHREAD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_MULTITHREAD_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_float.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h"
namespace tflite {
namespace optimized_ops {
// TODO(luwa): add multithread to per-channel depthwise_conv
// DepthwiseConv can run with multi threads on the dim specified by thread_dim.
// Each thread processes output elements on dim, thread_dim, in the range of
// [thread_start, thread_end).
// For example, assume thread_start = 2, thread_end = 6, and thread_dim = 1, it
// means that it will calculate DepthwiseConv for output_data[:, 2:5, :, :].
template <typename T, typename TS>
struct DepthwiseConvWorkerTask : cpu_backend_threadpool::Task {
DepthwiseConvWorkerTask(const DepthwiseParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& filter_shape,
const T* filter_data, const RuntimeShape& bias_shape,
const TS* bias_data, const RuntimeShape& output_shape,
T* output_data, const CpuFlags& cpu_flags,
int thread_start, int thread_end, int thread_dim)
: params_(params),
input_shape_(input_shape),
input_data_(input_data),
filter_shape_(filter_shape),
filter_data_(filter_data),
bias_shape_(bias_shape),
bias_data_(bias_data),
output_shape_(output_shape),
output_data_(output_data),
cpu_flags_(cpu_flags),
thread_start_(thread_start),
thread_end_(thread_end),
thread_dim_(thread_dim) {}
void Run() override {
DepthwiseConvImpl(params_, input_shape_, input_data_, filter_shape_,
filter_data_, bias_shape_, bias_data_, output_shape_,
output_data_, cpu_flags_, thread_start_, thread_end_,
thread_dim_);
}
private:
const DepthwiseParams& params_;
const RuntimeShape& input_shape_;
const T* input_data_;
const RuntimeShape& filter_shape_;
const T* filter_data_;
const RuntimeShape& bias_shape_;
const TS* bias_data_;
const RuntimeShape& output_shape_;
T* output_data_;
const CpuFlags& cpu_flags_;
int thread_start_;
int thread_end_;
int thread_dim_;
};
inline int HowManyConvThreads(const RuntimeShape& output_shape,
const RuntimeShape& filter_shape) {
// How many scalar multiplications are needed to make it worth using one
// more thread
static constexpr int kMinMulPerThread = 1 << 13; // 8k
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int num_muls = output_shape.FlatSize() * filter_height * filter_width;
// Try to avoid real runtime divisions if possible by dividing by a
// compile-time constant.
int thread_count = std::max(1, num_muls / kMinMulPerThread);
return thread_count;
}
inline bool MultithreadAlongBatches(int thread_count, int batches) {
TFLITE_DCHECK_GE(thread_count, 2);
// If there are fewer batch entries than the number of threads we want to use,
// then better do intra-batch-entry multithreading.
if (batches < thread_count) {
return false;
}
// If there are at least 2 batch entries to be handed to each thread, then
// it's safe to proceed with batch-wise multithreading: each thread will have
// approximately equal number of batch entries to handle, so the load
// balancing will be reasonable, and the amount to which the load is not
// perfectly balanced will be offset by the inherent advantages of
// batch-wise multithreading (each thread is more efficient thanks to working
// on larger buffers with less boundary-handling overhead).
if (batches >= 2 * thread_count) {
return true;
}
// In the limit case were there are at least 1 but not much more than 1
// batch entries per thread, it may be a good idea to do per-batch
// multithreading if the number of batch entries is a multiple of the number
// of threads, so that each thread will have the same number of batch entries
// to process.
return ((batches % thread_count) == 0);
}
template <typename T, typename TS>
inline void DepthwiseConv(const DepthwiseParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& filter_shape,
const T* filter_data, const RuntimeShape& bias_shape,
const TS* bias_data, const RuntimeShape& output_shape,
T* output_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("DepthwiseConv");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
int thread_count = HowManyConvThreads(output_shape, filter_shape);
const int max_threads = cpu_backend_context->max_num_threads();
thread_count = std::max(1, std::min(thread_count, max_threads));
#ifndef TFLITE_WITH_RUY
// Cap the number of threads to 2 for float path to avoid regression in
// performance (b/132294857).
if (std::is_floating_point<T>::value) {
thread_count = std::min(thread_count, 2);
}
#endif
const int output_batches = output_shape.Dims(0);
const int output_height = output_shape.Dims(1);
CpuFlags cpu_flags;
GetCpuFlags(&cpu_flags);
if (thread_count == 1) {
DepthwiseConvImpl(params, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape,
output_data, cpu_flags, /*thread_start=*/0,
/*thread_end=*/output_height, /*thread_dim=*/1);
return;
}
int thread_dim, thread_dim_size;
if (MultithreadAlongBatches(thread_count, output_batches)) {
thread_dim = 0;
thread_dim_size = output_batches;
} else {
thread_dim = 1;
thread_dim_size = output_height;
}
std::vector<DepthwiseConvWorkerTask<T, TS>> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks.emplace_back(params, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape,
output_data, cpu_flags, thread_start, thread_end,
thread_dim);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_MULTITHREAD_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/depthwiseconv_multithread.h | C++ | apache-2.0 | 7,989 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_H_
#include <type_traits>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
#ifdef __AVX2__
#include <x86intrin.h>
#endif
namespace tflite {
namespace optimized_ops {
namespace depthwise_conv {
// Implementation of quantized DepthwiseConv
template <bool kAllowStrided, int kFixedInputDepth, int kFixedDepthMultiplier>
struct QuantizedDepthwiseConvKernel {};
#ifdef USE_NEON
template <>
struct QuantizedDepthwiseConvKernel<true, 8, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8x2_t filter_u8;
filter_u8.val[0] = vld1_u8(filter_ptr);
filter_u8.val[1] = vld1_u8(filter_ptr + 8);
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vaddq_s16(vreinterpretq_s16_u16(vmovl_u8(filter_u8.val[i])),
vdupq_n_s16(filter_offset));
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4x2_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_s32(acc_buffer_ptr + 4 * i);
acc[i].val[1] = vld1q_s32(acc_buffer_ptr + 4 * i + 8);
}
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += input_ptr_increment;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[0].val[i] = vmlal_s16(acc[0].val[i], vget_low_s16(filter[i]),
vget_low_s16(input_dup2.val[i]));
acc[1].val[i] = vmlal_s16(acc[1].val[i], vget_high_s16(filter[i]),
vget_high_s16(input_dup2.val[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i].val[0]);
vst1q_s32(acc_buffer_ptr + 4 * i + 8, acc[i].val[1]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 8, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
const uint8x8_t filter_u8 = vld1_u8(filter_ptr);
const int16x8_t filter_s16 = vreinterpretq_s16_u16(vmovl_u8(filter_u8));
const int16x8_t filter = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8[2];
for (int i = 0; i < 2; i++) {
input_u8[i] = vld1_u8(input_ptr + 8 * i);
}
input_ptr += 16;
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vreinterpretq_s16_u16(vmovl_u8(input_u8[i]));
}
for (int i = 0; i < 2; i++) {
input[i] = vaddq_s16(input[i], vdupq_n_s16(input_offset));
}
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], vget_low_s16(filter), vget_low_s16(input[0]));
acc[1] =
vmlal_s16(acc[1], vget_high_s16(filter), vget_high_s16(input[0]));
acc[2] = vmlal_s16(acc[2], vget_low_s16(filter), vget_low_s16(input[1]));
acc[3] =
vmlal_s16(acc[3], vget_high_s16(filter), vget_high_s16(input[1]));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle 1 output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x4_t acc[2];
acc[0] = vld1q_s32(acc_buffer_ptr);
acc[1] = vld1q_s32(acc_buffer_ptr + 4);
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], vget_low_s16(filter), vget_low_s16(input));
acc[1] = vmlal_s16(acc[1], vget_high_s16(filter), vget_high_s16(input));
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc[0]);
vst1q_s32(acc_buffer_ptr + 4, acc[1]);
acc_buffer_ptr += 8;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 4, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
const uint8x8_t filter_u8 = vld1_u8(filter_ptr);
const int16x8_t filter_s16 = vreinterpretq_s16_u16(vmovl_u8(filter_u8));
const int16x8_t filter = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] = vmlal_s16(acc[2 * i + 0], vget_low_s16(filter),
vget_low_s16(input_dup2.val[i]));
acc[2 * i + 1] = vmlal_s16(acc[2 * i + 1], vget_high_s16(filter),
vget_high_s16(input_dup2.val[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
input_ptr += 4;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x4x2_t input_dup2 = vzip_s16(input, input);
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], vget_low_s16(filter), input_dup2.val[0]);
acc[1] = vmlal_s16(acc[1], vget_high_s16(filter), input_dup2.val[1]);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 2, 8> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
const uint8x8_t filter_u8 = vld1_u8(filter_ptr + 8 * i);
const int16x8_t filter_s16 = vreinterpretq_s16_u16(vmovl_u8(filter_u8));
filter[i] = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
}
int outp = 0;
// Handle two output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc[8];
for (int i = 0; i < 8; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
input_ptr += 4;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]), input, 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]), input, 0);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]), input, 1);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]), input, 1);
acc[4] = vmlal_lane_s16(acc[4], vget_low_s16(filter[0]), input, 2);
acc[5] = vmlal_lane_s16(acc[5], vget_high_s16(filter[0]), input, 2);
acc[6] = vmlal_lane_s16(acc[6], vget_low_s16(filter[1]), input, 3);
acc[7] = vmlal_lane_s16(acc[7], vget_high_s16(filter[1]), input, 3);
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 8; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 32;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_ptr += 2;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]), input, 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]), input, 0);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]), input, 1);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]), input, 1);
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 2, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[2], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[3], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input_dup2.val[0]));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input_dup2.val[0]));
acc[2] = vmlal_s16(acc[2], filter, vget_low_s16(input_dup2.val[1]));
acc[3] = vmlal_s16(acc[3], filter, vget_high_s16(input_dup2.val[1]));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_ptr += 2;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x4_t input_dup2 = vzip_s16(input, input).val[0];
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input_dup2);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 2, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the accumulators from acc_buffer.
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8[2];
for (int i = 0; i < 2; i++) {
input_u8[i] = vld1_u8(input_ptr + 8 * i);
}
input_ptr += 16;
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vreinterpretq_s16_u16(vmovl_u8(input_u8[i]));
}
for (int i = 0; i < 2; i++) {
input[i] = vaddq_s16(input[i], vdupq_n_s16(input_offset));
}
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input[0]));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input[0]));
acc[2] = vmlal_s16(acc[2], filter, vget_low_s16(input[1]));
acc[3] = vmlal_s16(acc[3], filter, vget_high_s16(input[1]));
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer.
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input));
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
input_ptr += 4;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer.
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle 1 output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x2_t acc = vld1_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_ptr += 2;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vget_low_s32(vmlal_s16(vcombine_s32(acc, acc), filter, input));
// Store the accumulators back to acc_buffer.
vst1_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 1, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input_dup2.val[0]));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input_dup2.val[0]));
acc[2] = vmlal_s16(acc[2], filter, vget_low_s16(input_dup2.val[1]));
acc[3] = vmlal_s16(acc[3], filter, vget_high_s16(input_dup2.val[1]));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x2_t acc = vld1_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
const uint32 input = *input_ptr++ + input_offset;
// Multiply-accumulate
acc = vget_low_s32(vmlal_n_s16(vcombine_s32(acc, acc), filter, input));
// Store the accumulators back to acc_buffer
vst1_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 1, 4> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[2], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[3], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the accumulators from acc_buffer
int32x4_t acc[8];
for (int i = 0; i < 8; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], filter, vget_low_s16(input), 0);
acc[1] = vmlal_lane_s16(acc[1], filter, vget_low_s16(input), 1);
acc[2] = vmlal_lane_s16(acc[2], filter, vget_low_s16(input), 2);
acc[3] = vmlal_lane_s16(acc[3], filter, vget_low_s16(input), 3);
acc[4] = vmlal_lane_s16(acc[4], filter, vget_high_s16(input), 0);
acc[5] = vmlal_lane_s16(acc[5], filter, vget_high_s16(input), 1);
acc[6] = vmlal_lane_s16(acc[6], filter, vget_high_s16(input), 2);
acc[7] = vmlal_lane_s16(acc[7], filter, vget_high_s16(input), 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 8; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 32;
}
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
input_ptr += 4;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], filter, input, 0);
acc[1] = vmlal_lane_s16(acc[1], filter, input, 1);
acc[2] = vmlal_lane_s16(acc[2], filter, input, 2);
acc[3] = vmlal_lane_s16(acc[3], filter, input, 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
const uint32 input = *input_ptr++ + input_offset;
// Multiply-accumulate
acc = vmlal_n_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 4, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[2], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[3], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
const uint8x8_t input_u8 = vld1_u8(input_ptr + 8 * i);
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
input[i] = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
}
input_ptr += 16;
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] =
vmlal_s16(acc[2 * i + 0], filter, vget_low_s16(input[i]));
acc[2 * i + 1] =
vmlal_s16(acc[2 * i + 1], filter, vget_high_s16(input[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc;
acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
input_ptr += 4;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 4, 4> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
const uint8x8_t filter_u8 = vld1_u8(filter_ptr + 8 * i);
const int16x8_t filter_s16 = vreinterpretq_s16_u16(vmovl_u8(filter_u8));
filter[i] = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
}
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer
int32x4_t acc[8];
for (int i = 0; i < 8; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]),
vget_low_s16(input), 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]),
vget_low_s16(input), 1);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]),
vget_low_s16(input), 2);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]),
vget_low_s16(input), 3);
acc[4] = vmlal_lane_s16(acc[4], vget_low_s16(filter[0]),
vget_high_s16(input), 0);
acc[5] = vmlal_lane_s16(acc[5], vget_high_s16(filter[0]),
vget_high_s16(input), 1);
acc[6] = vmlal_lane_s16(acc[6], vget_low_s16(filter[1]),
vget_high_s16(input), 2);
acc[7] = vmlal_lane_s16(acc[7], vget_high_s16(filter[1]),
vget_high_s16(input), 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 8; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 32;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
input_ptr += 4;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]), input, 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]), input, 1);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]), input, 2);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]), input, 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 0, 3> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// We will have to duplicate bytes in a NEON register, 3-fold.
// We will do that by register-level table-look-up using VTBL instructions.
// Here we prepare the registers containing the table-lookup indices.
static const uint8 dup3_indices_array[3][8] = {{0, 0, 0, 1, 1, 1, 2, 2},
{2, 3, 3, 3, 4, 4, 4, 5},
{5, 5, 6, 6, 6, 7, 7, 7}};
uint8x8_t dup3_indices[3];
for (int i = 0; i < 3; i++) {
dup3_indices[i] = vld1_u8(dup3_indices_array[i]);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const uint8* local_filter_ptr = filter_ptr;
const uint8* local_input_ptr = input_ptr;
int ic = 0;
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters, add filter_offset.
int16x8_t filter[3];
uint8x8x3_t filter_u8;
filter_u8.val[0] = vld1_u8(local_filter_ptr);
filter_u8.val[1] = vld1_u8(local_filter_ptr + 8);
filter_u8.val[2] = vld1_u8(local_filter_ptr + 16);
local_filter_ptr += 24;
for (int i = 0; i < 3; i++) {
const int16x8_t filter_s16 =
vreinterpretq_s16_u16(vmovl_u8(filter_u8.val[i]));
filter[i] = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
}
// Load the inputs, duplicate 3-fold, add input_offset.
const uint8x8_t input_u8 = vld1_u8(local_input_ptr);
local_input_ptr += 8;
uint8x8_t input_u8_dup3[3];
for (int i = 0; i < 3; i++) {
input_u8_dup3[i] = vtbl1_u8(input_u8, dup3_indices[i]);
}
int16x8_t input_dup3[3];
for (int i = 0; i < 3; i++) {
const int16x8_t input_s16_dup3 =
vreinterpretq_s16_u16(vmovl_u8(input_u8_dup3[i]));
input_dup3[i] = vaddq_s16(input_s16_dup3, vdupq_n_s16(input_offset));
}
// Load the accumulators from acc_buffer
int32x4x3_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_s32(acc_buffer_ptr + 4 * i);
acc[i].val[1] = vld1q_s32(acc_buffer_ptr + 4 * i + 8);
acc[i].val[2] = vld1q_s32(acc_buffer_ptr + 4 * i + 16);
}
// Multiply-accumulate
for (int j = 0; j < 3; j++) {
acc[0].val[j] = vmlal_s16(acc[0].val[j], vget_low_s16(input_dup3[j]),
vget_low_s16(filter[j]));
acc[1].val[j] = vmlal_s16(acc[1].val[j], vget_high_s16(input_dup3[j]),
vget_high_s16(filter[j]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i].val[0]);
vst1q_s32(acc_buffer_ptr + 4 * i + 8, acc[i].val[1]);
vst1q_s32(acc_buffer_ptr + 4 * i + 16, acc[i].val[2]);
}
acc_buffer_ptr += 24;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
const int16 input_val = *local_input_ptr++ + input_offset;
for (int i = 0; i < 3; i++) {
const int16 filter_val = local_filter_ptr[i] + filter_offset;
*acc_buffer_ptr++ += static_cast<int32>(filter_val) * input_val;
}
local_filter_ptr += 3;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 0, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const uint8* local_filter_ptr = filter_ptr;
const uint8* local_input_ptr = input_ptr;
int ic = 0;
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters, add filter_offset.
int16x8_t filter[2];
uint8x8x2_t filter_u8;
filter_u8.val[0] = vld1_u8(local_filter_ptr);
filter_u8.val[1] = vld1_u8(local_filter_ptr + 8);
local_filter_ptr += 16;
for (int i = 0; i < 2; i++) {
const int16x8_t filter_s16 =
vreinterpretq_s16_u16(vmovl_u8(filter_u8.val[i]));
filter[i] = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
}
// Load the inputs, add input_offset, duplicate 2-fold.
const uint8x8_t input_u8 = vld1_u8(local_input_ptr);
local_input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Load the accumulators from acc_buffer.
int32x4x2_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_s32(acc_buffer_ptr + 4 * i);
acc[i].val[1] = vld1q_s32(acc_buffer_ptr + 4 * i + 8);
}
// Multiply-accumulate.
for (int j = 0; j < 2; j++) {
acc[0].val[j] = vmlal_s16(acc[0].val[j], vget_low_s16(filter[j]),
vget_low_s16(input_dup2.val[j]));
acc[1].val[j] = vmlal_s16(acc[1].val[j], vget_high_s16(filter[j]),
vget_high_s16(input_dup2.val[j]));
}
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i].val[0]);
vst1q_s32(acc_buffer_ptr + 4 * i + 8, acc[i].val[1]);
}
acc_buffer_ptr += 16;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
// Load the inputs.
const int16 input_val = *local_input_ptr++ + input_offset;
for (int i = 0; i < 2; i++) {
const int16 filter_val = local_filter_ptr[i] + filter_offset;
*acc_buffer_ptr++ += static_cast<int32>(filter_val) * input_val;
}
local_filter_ptr += 2;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 0, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const uint8* local_filter_ptr = filter_ptr;
const uint8* local_input_ptr = input_ptr;
int ic = 0;
// Handle 16 input channels at a time.
for (; ic <= input_depth - 16; ic += 16) {
#ifdef __AVX2__
// Load the filters, add filter_offset.
__m128i filter_u8_0 = _mm_loadl_epi64(
reinterpret_cast<const __m128i*>(local_filter_ptr + 8 * 0));
__m128i filter_u8_1 = _mm_loadl_epi64(
reinterpret_cast<const __m128i*>(local_filter_ptr + 8 * 1));
local_filter_ptr += 16;
__m256i filter_0 = _mm256_cvtepu8_epi32(filter_u8_0);
__m256i filter_1 = _mm256_cvtepu8_epi32(filter_u8_1);
__m256i filter_offset_vec = _mm256_set1_epi32(filter_offset);
filter_0 = _mm256_add_epi32(filter_0, filter_offset_vec);
filter_1 = _mm256_add_epi32(filter_1, filter_offset_vec);
// Load the inputs, add input_offset.
__m128i input_u8_0 = _mm_loadl_epi64(
reinterpret_cast<const __m128i*>(local_input_ptr + 8 * 0));
__m128i input_u8_1 = _mm_loadl_epi64(
reinterpret_cast<const __m128i*>(local_input_ptr + 8 * 1));
local_input_ptr += 16;
__m256i input_0 = _mm256_cvtepu8_epi32(input_u8_0);
__m256i input_1 = _mm256_cvtepu8_epi32(input_u8_1);
__m256i input_offset_vec = _mm256_set1_epi32(input_offset);
input_0 = _mm256_add_epi32(input_0, input_offset_vec);
input_1 = _mm256_add_epi32(input_1, input_offset_vec);
// Load the accumulators from acc_buffer
__m256i acc_0 = _mm256_loadu_si256(
reinterpret_cast<const __m256i*>(acc_buffer_ptr + 8 * 0));
__m256i acc_1 = _mm256_loadu_si256(
reinterpret_cast<const __m256i*>(acc_buffer_ptr + 8 * 1));
acc_0 = _mm256_add_epi32(acc_0, _mm256_mullo_epi32(input_0, filter_0));
acc_1 = _mm256_add_epi32(acc_1, _mm256_mullo_epi32(input_1, filter_1));
// Store the accumulators back to acc_buffer
_mm256_storeu_si256(reinterpret_cast<__m256i*>(acc_buffer_ptr + 8 * 0),
acc_0);
_mm256_storeu_si256(reinterpret_cast<__m256i*>(acc_buffer_ptr + 8 * 1),
acc_1);
acc_buffer_ptr += 16;
#else
// Load the filters, add filter_offset.
uint8x8_t filter_u8_0 = vld1_u8(local_filter_ptr + 8 * 0);
uint8x8_t filter_u8_1 = vld1_u8(local_filter_ptr + 8 * 1);
local_filter_ptr += 16;
int16x8_t filter_0 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_0));
int16x8_t filter_1 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_1));
filter_0 = vaddq_s16(filter_0, vdupq_n_s16(filter_offset));
filter_1 = vaddq_s16(filter_1, vdupq_n_s16(filter_offset));
// Load the inputs, add input_offset.
uint8x8_t input_u8_0 = vld1_u8(local_input_ptr + 8 * 0);
uint8x8_t input_u8_1 = vld1_u8(local_input_ptr + 8 * 1);
local_input_ptr += 16;
int16x8_t input_0 = vreinterpretq_s16_u16(vmovl_u8(input_u8_0));
int16x8_t input_1 = vreinterpretq_s16_u16(vmovl_u8(input_u8_1));
input_0 = vaddq_s16(input_0, vdupq_n_s16(input_offset));
input_1 = vaddq_s16(input_1, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
int32x4_t acc_3 = vld1q_s32(acc_buffer_ptr + 4 * 3);
acc_0 = vmlal_s16(acc_0, vget_low_s16(input_0), vget_low_s16(filter_0));
acc_1 =
vmlal_s16(acc_1, vget_high_s16(input_0), vget_high_s16(filter_0));
acc_2 = vmlal_s16(acc_2, vget_low_s16(input_1), vget_low_s16(filter_1));
acc_3 =
vmlal_s16(acc_3, vget_high_s16(input_1), vget_high_s16(filter_1));
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_s32(acc_buffer_ptr + 4 * 3, acc_3);
acc_buffer_ptr += 16;
#endif
}
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters, add filter_offset.
const uint8x8_t filter_u8 = vld1_u8(local_filter_ptr);
local_filter_ptr += 8;
const int16x8_t filter_s16 = vreinterpretq_s16_u16(vmovl_u8(filter_u8));
const int16x8_t filter =
vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(local_input_ptr);
local_input_ptr += 8;
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], vget_low_s16(input), vget_low_s16(filter));
acc[1] = vmlal_s16(acc[1], vget_high_s16(input), vget_high_s16(filter));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
const int16 input_val = *local_input_ptr++ + input_offset;
const int16 filter_val = *local_filter_ptr++ + filter_offset;
*acc_buffer_ptr++ += static_cast<int32>(filter_val) * input_val;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 16, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8[2];
for (int i = 0; i < 2; i++) {
filter_u8[i] = vld1_u8(filter_ptr + 8 * i);
}
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vreinterpretq_s16_u16(vmovl_u8(filter_u8[i]));
}
for (int i = 0; i < 2; i++) {
filter[i] = vaddq_s16(filter[i], vdupq_n_s16(filter_offset));
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs, add input_offset.
uint8x8_t input_u8[2];
for (int i = 0; i < 2; i++) {
input_u8[i] = vld1_u8(input_ptr + 8 * i);
}
input_ptr += input_ptr_increment;
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vreinterpretq_s16_u16(vmovl_u8(input_u8[i]));
}
for (int i = 0; i < 2; i++) {
input[i] = vaddq_s16(input[i], vdupq_n_s16(input_offset));
}
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] = vmlal_s16(acc[2 * i + 0], vget_low_s16(input[i]),
vget_low_s16(filter[i]));
acc[2 * i + 1] = vmlal_s16(acc[2 * i + 1], vget_high_s16(input[i]),
vget_high_s16(filter[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 8, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
const uint8x8_t filter_u8 = vld1_u8(filter_ptr);
const int16x8_t filter_s16 = vreinterpretq_s16_u16(vmovl_u8(filter_u8));
const int16x8_t filter = vaddq_s16(filter_s16, vdupq_n_s16(filter_offset));
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs, add input_offset.
const uint8x8_t input_u8 = vld1_u8(input_ptr);
const int16x8_t input_s16 = vreinterpretq_s16_u16(vmovl_u8(input_u8));
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], vget_low_s16(input), vget_low_s16(filter));
acc[1] = vmlal_s16(acc[1], vget_high_s16(input), vget_high_s16(filter));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 16> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8[2];
for (int i = 0; i < 2; i++) {
filter_u8[i] = vld1_u8(filter_ptr + 8 * i);
}
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vreinterpretq_s16_u16(vmovl_u8(filter_u8[i]));
}
for (int i = 0; i < 2; i++) {
filter[i] = vaddq_s16(filter[i], vdupq_n_s16(filter_offset));
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
uint8 input_u8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_u8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] =
vmlal_n_s16(acc[2 * i + 0], vget_low_s16(filter[i]), input);
acc[2 * i + 1] =
vmlal_n_s16(acc[2 * i + 1], vget_high_s16(filter[i]), input);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 32> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8_0 = vld1_u8(filter_ptr + 8 * 0);
uint8x8_t filter_u8_1 = vld1_u8(filter_ptr + 8 * 1);
uint8x8_t filter_u8_2 = vld1_u8(filter_ptr + 8 * 2);
uint8x8_t filter_u8_3 = vld1_u8(filter_ptr + 8 * 3);
int16x8_t filter_0 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_0));
int16x8_t filter_1 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_1));
int16x8_t filter_2 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_2));
int16x8_t filter_3 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_3));
filter_0 = vaddq_s16(filter_0, vdupq_n_s16(filter_offset));
filter_1 = vaddq_s16(filter_1, vdupq_n_s16(filter_offset));
filter_2 = vaddq_s16(filter_2, vdupq_n_s16(filter_offset));
filter_3 = vaddq_s16(filter_3, vdupq_n_s16(filter_offset));
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
uint8 input_u8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_u8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
int32x4_t acc_3 = vld1q_s32(acc_buffer_ptr + 4 * 3);
int32x4_t acc_4 = vld1q_s32(acc_buffer_ptr + 4 * 4);
int32x4_t acc_5 = vld1q_s32(acc_buffer_ptr + 4 * 5);
int32x4_t acc_6 = vld1q_s32(acc_buffer_ptr + 4 * 6);
int32x4_t acc_7 = vld1q_s32(acc_buffer_ptr + 4 * 7);
// Multiply-accumulate
acc_0 = vmlal_n_s16(acc_0, vget_low_s16(filter_0), input);
acc_1 = vmlal_n_s16(acc_1, vget_high_s16(filter_0), input);
acc_2 = vmlal_n_s16(acc_2, vget_low_s16(filter_1), input);
acc_3 = vmlal_n_s16(acc_3, vget_high_s16(filter_1), input);
acc_4 = vmlal_n_s16(acc_4, vget_low_s16(filter_2), input);
acc_5 = vmlal_n_s16(acc_5, vget_high_s16(filter_2), input);
acc_6 = vmlal_n_s16(acc_6, vget_low_s16(filter_3), input);
acc_7 = vmlal_n_s16(acc_7, vget_high_s16(filter_3), input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_s32(acc_buffer_ptr + 4 * 3, acc_3);
vst1q_s32(acc_buffer_ptr + 4 * 4, acc_4);
vst1q_s32(acc_buffer_ptr + 4 * 5, acc_5);
vst1q_s32(acc_buffer_ptr + 4 * 6, acc_6);
vst1q_s32(acc_buffer_ptr + 4 * 7, acc_7);
acc_buffer_ptr += 32;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 20> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
// NEON wants to load 8 bytes at a time, but 20 is not divisible by 8.
// We load the first 16 bytes into filter_u8_{0,1} as usual.
// Then we load the 8 last bytes into filter_u8_x (x for 'extra').
// This is redundant: the first 4 bytes of filter_u8_x are the same
// as the last 4 bytes of filter_u8_x.
uint8x8_t filter_u8_0 = vld1_u8(filter_ptr + 8 * 0);
uint8x8_t filter_u8_1 = vld1_u8(filter_ptr + 8 * 1);
uint8x8_t filter_u8_x = vld1_u8(filter_ptr + 8 * 1 + 4);
int16x8_t filter_0 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_0));
int16x8_t filter_1 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_1));
int16x8_t filter_x = vreinterpretq_s16_u16(vmovl_u8(filter_u8_x));
filter_0 = vaddq_s16(filter_0, vdupq_n_s16(filter_offset));
filter_1 = vaddq_s16(filter_1, vdupq_n_s16(filter_offset));
filter_x = vaddq_s16(filter_x, vdupq_n_s16(filter_offset));
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
uint8 input_u8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_u8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
int32x4_t acc_3 = vld1q_s32(acc_buffer_ptr + 4 * 3);
int32x4_t acc_4 = vld1q_s32(acc_buffer_ptr + 4 * 4);
// Multiply-accumulate
acc_0 = vmlal_n_s16(acc_0, vget_low_s16(filter_0), input);
acc_1 = vmlal_n_s16(acc_1, vget_high_s16(filter_0), input);
acc_2 = vmlal_n_s16(acc_2, vget_low_s16(filter_1), input);
acc_3 = vmlal_n_s16(acc_3, vget_high_s16(filter_1), input);
acc_4 = vmlal_n_s16(acc_4, vget_high_s16(filter_x), input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_s32(acc_buffer_ptr + 4 * 3, acc_3);
vst1q_s32(acc_buffer_ptr + 4 * 4, acc_4);
acc_buffer_ptr += 20;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 8> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
const uint8x8_t filter_u8 = vld1_u8(filter_ptr);
const int16x8_t filter = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(filter_u8)), vdupq_n_s16(filter_offset));
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
uint8 input_u8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_u8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlal_n_s16(acc[0], vget_low_s16(filter), input);
acc[1] = vmlal_n_s16(acc[1], vget_high_s16(filter), input);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 2, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint16x4_t input_u16 = vdup_n_u16(0);
input_u16 = vset_lane_u16((reinterpret_cast<const uint16*>(input_ptr))[0],
input_u16, 0);
input_ptr += input_ptr_increment;
input_u16 = vset_lane_u16((reinterpret_cast<const uint16*>(input_ptr))[0],
input_u16, 1);
input_ptr += input_ptr_increment;
const int16x4_t input_s16 = vreinterpret_s16_u16(
vget_low_u16(vmovl_u8(vreinterpret_u8_u16(input_u16))));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer.
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle 1 output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x2_t acc = vld1_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_ptr += input_ptr_increment;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vget_low_s32(vmlal_s16(vcombine_s32(acc, acc), filter, input));
// Store the accumulators back to acc_buffer.
vst1_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 4, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
if (num_output_pixels <= 0) {
return;
}
// Load the filters, add filter_offset.
uint8x8_t filter_u8 = vdup_n_u8(0);
filter_u8 = vset_lane_u8(filter_ptr[0], filter_u8, 0);
filter_u8 = vset_lane_u8(filter_ptr[1], filter_u8, 1);
filter_u8 = vset_lane_u8(filter_ptr[2], filter_u8, 2);
filter_u8 = vset_lane_u8(filter_ptr[3], filter_u8, 3);
const int16x4_t filter_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(filter_u8)));
const int16x4_t filter = vadd_s16(filter_s16, vdup_n_s16(filter_offset));
int outp = 0;
// Handle one output pixel at a time until second to the last pixel. Second
// to the last because we read eight input pixels while only processing
// four.
for (; outp < num_output_pixels - 1; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc;
acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vld1_u8(input_ptr);
input_ptr += input_ptr_increment;
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle the last output pixel.
// Load the accumulators from acc_buffer
int32x4_t acc;
acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
uint8x8_t input_u8 = vdup_n_u8(0);
input_u8 = vset_lane_u8(input_ptr[0], input_u8, 0);
input_u8 = vset_lane_u8(input_ptr[1], input_u8, 1);
input_u8 = vset_lane_u8(input_ptr[2], input_u8, 2);
input_u8 = vset_lane_u8(input_ptr[3], input_u8, 3);
const int16x4_t input_s16 =
vreinterpret_s16_u16(vget_low_u16(vmovl_u8(input_u8)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 12, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const uint8* input_ptr, int16 input_offset,
int input_ptr_increment, const uint8* filter_ptr,
int16 filter_offset, int32* acc_buffer_ptr) {
// Load the filters, add filter_offset.
uint8x8_t filter_u8_0 = vld1_u8(filter_ptr);
uint8x8_t filter_u8_1 = vld1_u8(filter_ptr + 4);
int16x8_t filter_s16_0 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_0));
int16x8_t filter_s16_1 = vreinterpretq_s16_u16(vmovl_u8(filter_u8_1));
filter_s16_0 = vaddq_s16(filter_s16_0, vdupq_n_s16(filter_offset));
filter_s16_1 = vaddq_s16(filter_s16_1, vdupq_n_s16(filter_offset));
int16x4_t filter_0 = vget_low_s16(filter_s16_0);
int16x4_t filter_1 = vget_high_s16(filter_s16_0);
int16x4_t filter_2 = vget_high_s16(filter_s16_1);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs, add input_offset.
uint8x8_t input_u8_0 = vld1_u8(input_ptr);
uint8x8_t input_u8_1 = vld1_u8(input_ptr + 4);
input_ptr += input_ptr_increment;
int16x8_t input_0 = vreinterpretq_s16_u16(vmovl_u8(input_u8_0));
int16x8_t input_1 = vreinterpretq_s16_u16(vmovl_u8(input_u8_1));
input_0 = vaddq_s16(input_0, vdupq_n_s16(input_offset));
input_1 = vaddq_s16(input_1, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
// Multiply-accumulate
acc_0 = vmlal_s16(acc_0, vget_low_s16(input_0), filter_0);
acc_1 = vmlal_s16(acc_1, vget_high_s16(input_0), filter_1);
acc_2 = vmlal_s16(acc_2, vget_high_s16(input_1), filter_2);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
acc_buffer_ptr += 12;
}
}
};
#endif
// Accumulates the effect of one row of the filter, on a segment of one row
// of the output, accessing the corresponding one row of the input.
template <bool kAllowStrided, int kFixedInputDepth, int kFixedDepthMultiplier>
void QuantizedDepthwiseConvAccumRow(int stride, int dilation_factor,
int input_depth, int input_width,
const uint8* input_data, int16 input_offset,
int pad_width, int depth_multiplier,
int filter_width, const uint8* filter_data,
int16 filter_offset, int out_x_buffer_start,
int out_x_buffer_end, int output_depth,
int32* acc_buffer) {
ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__);
// Consistency check parameters. This is important in particular to ensure
// that we keep the number of template instantiations minimal, so we don't
// increase binary size unnecessarily.
static_assert(kFixedDepthMultiplier || !kFixedInputDepth, "");
static_assert(kFixedInputDepth || kAllowStrided, "");
TFLITE_DCHECK(stride == 1 || kAllowStrided);
if (kFixedInputDepth) {
TFLITE_DCHECK_EQ(input_depth, kFixedInputDepth);
}
if (kFixedDepthMultiplier) {
TFLITE_DCHECK_EQ(depth_multiplier, kFixedDepthMultiplier);
}
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
const int input_ptr_increment = stride * input_depth;
const uint8* filter_base_ptr = filter_data;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
// For the current (filter_x, filter_y) point in the filter,
// compute the boundaries of the corresponding output row segment.
int out_x_loop_start_unclamped = 0;
int out_x_loop_end_unclamped = 0;
if (kAllowStrided) {
if (stride == 2) {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + 1) / 2;
out_x_loop_end_unclamped =
(pad_width + input_width - dilation_factor * filter_x + 1) / 2;
} else if (stride == 4) {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + 3) / 4;
out_x_loop_end_unclamped =
(pad_width + input_width - dilation_factor * filter_x + 3) / 4;
} else {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + stride - 1) / stride;
out_x_loop_end_unclamped = (pad_width + input_width -
dilation_factor * filter_x + stride - 1) /
stride;
}
} else {
out_x_loop_start_unclamped = pad_width - dilation_factor * filter_x;
out_x_loop_end_unclamped =
pad_width + input_width - dilation_factor * filter_x;
}
// The kernel will have to iterate on the segment of the
// output row that starts at out_x_loop_start and out_x_loop_end.
const int out_x_loop_start =
std::max(out_x_buffer_start, out_x_loop_start_unclamped);
const int out_x_loop_end =
std::min(out_x_buffer_end, out_x_loop_end_unclamped);
int32* acc_buffer_ptr =
acc_buffer + (out_x_loop_start - out_x_buffer_start) * output_depth;
const int in_x_origin =
(out_x_loop_start * stride) - pad_width + dilation_factor * filter_x;
const uint8* input_ptr = input_data + in_x_origin * input_depth;
const int num_output_pixels = out_x_loop_end - out_x_loop_start;
QuantizedDepthwiseConvKernel<
kAllowStrided, kFixedInputDepth,
kFixedDepthMultiplier>::Run(num_output_pixels, input_depth,
depth_multiplier, input_ptr, input_offset,
input_ptr_increment, filter_base_ptr,
filter_offset, acc_buffer_ptr);
filter_base_ptr += output_depth;
}
}
// generic fallback of DepthwiseConvAccumRow, portable, non-templatized.
inline void QuantizedDepthwiseConvAccumRowGeneric(
int stride, int dilation_factor, int input_depth, int input_width,
const uint8* input_data, int16 input_offset, int pad_width,
int depth_multiplier, int filter_width, const uint8* filter_data,
int16 filter_offset, int out_x_buffer_start, int out_x_buffer_end,
int output_depth, int32* acc_buffer) {
ruy::profiler::ScopeLabel label("DepthwiseConvAccumRowGeneric (slow)");
const uint8* filter_base_ptr = filter_data;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int out_x_loop_start = std::max(
out_x_buffer_start,
(pad_width - dilation_factor * filter_x + stride - 1) / stride);
const int out_x_loop_end = std::min(
out_x_buffer_end,
(pad_width + input_width - dilation_factor * filter_x + stride - 1) /
stride);
int32* acc_buffer_ptr =
acc_buffer + (out_x_loop_start - out_x_buffer_start) * output_depth;
const int in_x_origin =
(out_x_loop_start * stride) - pad_width + dilation_factor * filter_x;
const uint8* input_ptr = input_data + in_x_origin * input_depth;
const int input_ptr_increment = (stride - 1) * input_depth;
for (int out_x = out_x_loop_start; out_x < out_x_loop_end; out_x++) {
const uint8* filter_ptr = filter_base_ptr;
for (int ic = 0; ic < input_depth; ++ic) {
const int16 input_val = *input_ptr++ + input_offset;
for (int m = 0; m < depth_multiplier; m++) {
const int16 filter_val = *filter_ptr++ + filter_offset;
*acc_buffer_ptr++ += static_cast<int32>(filter_val) * input_val;
}
}
input_ptr += input_ptr_increment;
}
filter_base_ptr += output_depth;
}
}
// Initializes the accumulator buffer with bias values.
inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth,
const int32* bias_data,
int32* acc_buffer) {
int i = 0;
#ifdef USE_NEON
if (output_depth == 1) {
const int32x4_t b = vdupq_n_s32(bias_data[0]);
for (; i <= num_output_pixels - 16; i += 16) {
vst1q_s32(acc_buffer + i + 0, b);
vst1q_s32(acc_buffer + i + 4, b);
vst1q_s32(acc_buffer + i + 8, b);
vst1q_s32(acc_buffer + i + 12, b);
}
for (; i <= num_output_pixels - 4; i += 4) {
vst1q_s32(acc_buffer + i, b);
}
} else if (output_depth == 2) {
int32x4_t b = vdupq_n_s32(bias_data[0]);
b = vsetq_lane_s32(bias_data[1], b, 1);
b = vsetq_lane_s32(bias_data[1], b, 3);
for (; i <= num_output_pixels - 8; i += 8) {
vst1q_s32(acc_buffer + 2 * i + 0, b);
vst1q_s32(acc_buffer + 2 * i + 4, b);
vst1q_s32(acc_buffer + 2 * i + 8, b);
vst1q_s32(acc_buffer + 2 * i + 12, b);
}
for (; i <= num_output_pixels - 2; i += 2) {
vst1q_s32(acc_buffer + 2 * i, b);
}
} else if (output_depth == 4) {
const int32x4_t b = vld1q_s32(bias_data);
for (; i <= num_output_pixels - 4; i += 4) {
vst1q_s32(acc_buffer + 4 * i + 0, b);
vst1q_s32(acc_buffer + 4 * i + 4, b);
vst1q_s32(acc_buffer + 4 * i + 8, b);
vst1q_s32(acc_buffer + 4 * i + 12, b);
}
for (; i < num_output_pixels; i++) {
vst1q_s32(acc_buffer + 4 * i, b);
}
} else if (output_depth == 8) {
const int32x4_t b0 = vld1q_s32(bias_data);
const int32x4_t b1 = vld1q_s32(bias_data + 4);
for (; i <= num_output_pixels - 2; i += 2) {
vst1q_s32(acc_buffer + 8 * i + 0, b0);
vst1q_s32(acc_buffer + 8 * i + 4, b1);
vst1q_s32(acc_buffer + 8 * i + 8, b0);
vst1q_s32(acc_buffer + 8 * i + 12, b1);
}
for (; i < num_output_pixels; i++) {
vst1q_s32(acc_buffer + 8 * i + 0, b0);
vst1q_s32(acc_buffer + 8 * i + 4, b1);
}
} else if (output_depth == 16) {
const int32x4_t b0 = vld1q_s32(bias_data);
const int32x4_t b1 = vld1q_s32(bias_data + 4);
const int32x4_t b2 = vld1q_s32(bias_data + 8);
const int32x4_t b3 = vld1q_s32(bias_data + 12);
for (; i < num_output_pixels; i++) {
vst1q_s32(acc_buffer + 16 * i + 0, b0);
vst1q_s32(acc_buffer + 16 * i + 4, b1);
vst1q_s32(acc_buffer + 16 * i + 8, b2);
vst1q_s32(acc_buffer + 16 * i + 12, b3);
}
}
#endif
for (; i < num_output_pixels; i++) {
memcpy(acc_buffer + i * output_depth, bias_data,
sizeof(acc_buffer[0]) * output_depth);
}
}
inline void DepthwiseConvGeneral(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, int thread_start, int thread_end, int thread_dim) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
#ifdef USE_NEON
const bool shift_left = (output_shift > 0);
const int32 multiplier_power_of_two = shift_left ? (1 << output_shift) : 1;
#endif
// The default Accbuffer size is 2048, will allocate a bigger memory if it's
// not enough.
// TODO(b/136089667): If output_depth > 2048 happens a lot, we should just use
// a scratch tensor.
static const int kStackAccBufferSize = 2048;
int acc_buffer_size = kStackAccBufferSize;
int32 stack_acc_buffer[kStackAccBufferSize];
int32* acc_buffer = stack_acc_buffer;
std::unique_ptr<int32[]> heap_acc_buffer;
if (kStackAccBufferSize < output_depth) {
heap_acc_buffer.reset(new int32[output_depth]);
acc_buffer = heap_acc_buffer.get();
acc_buffer_size = output_depth;
}
const int kOutputPixelsInAccBuffer = acc_buffer_size / output_depth;
const int acc_buffer_size_actually_used =
kOutputPixelsInAccBuffer * output_depth;
TFLITE_DCHECK_LE(kOutputPixelsInAccBuffer * output_depth,
acc_buffer_size_actually_used);
TFLITE_DCHECK_LE(acc_buffer_size_actually_used, acc_buffer_size);
TFLITE_DCHECK_GE(kOutputPixelsInAccBuffer, 1);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
// row_accum_func will point to the core accumulation function to be used
// for this DepthwiseConv op.
using row_accum_func_t = decltype(&QuantizedDepthwiseConvAccumRowGeneric);
row_accum_func_t row_accum_func = nullptr;
#define TFMINI_USE_DEPTHWISECONV_KERNEL(ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER) \
if (!row_accum_func && (stride_width == 1 || ALLOW_STRIDED) && \
(input_depth == FIXED_INPUT_DEPTH || FIXED_INPUT_DEPTH == 0) && \
depth_multiplier == FIXED_DEPTH_MULTIPLIER) { \
row_accum_func = \
QuantizedDepthwiseConvAccumRow<ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER>; \
}
#ifdef USE_NEON
// We go over our list of kernels by decreasing order of preference
// for the cases where multiple kernels could apply.
// Start with the fastest kernels: AllowStrided=false, fixed input depth.
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 12, 1)
// Next come the strided kernels: AllowStrided=true, fixed input depth.
// They are a bit less efficient, but allow stride!=1.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 16, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 16)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 20)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 32)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 4, 1)
// Finally, the kernels allowing a variable input depth,
// these are the least efficient but most general kernels.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 3)
#endif // USE_NEON
// No matching fast kernel found, use slow fallback.
if (!row_accum_func) {
row_accum_func = QuantizedDepthwiseConvAccumRowGeneric;
}
#undef TFMINI_USE_DEPTHWISECONV_KERNEL
const int input_height_stride = input_shape.Dims(3) * input_shape.Dims(2);
const int input_batch_stride = input_height_stride * input_shape.Dims(1);
const int filter_height_stride = filter_shape.Dims(3) * filter_shape.Dims(2);
// Now that we have determined row_accum_func, we can start work.
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_height;
int output_ptr_offset = 0;
switch (thread_dim) {
case 0:
// Multithread along with the batch axis
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
output_ptr_offset = batch_start * FlatSizeSkipDim(output_shape, 0);
break;
case 1:
// Multithread along with the row axis
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_height);
row_start = thread_start;
row_end = thread_end;
output_ptr_offset = row_start * output_width * output_depth;
break;
}
uint8* output_ptr = output_data + output_ptr_offset;
int batch_step =
(output_height + row_start - row_end) * output_width * output_depth;
for (int b = batch_start; b < batch_end; ++b) {
for (int out_y = row_start; out_y < row_end; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
const int filter_y_start =
std::max(0, (-in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
const int filter_y_end =
std::min(filter_height,
(input_height - in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
for (int out_x_buffer_start = 0; out_x_buffer_start < output_width;
out_x_buffer_start += kOutputPixelsInAccBuffer) {
const int out_x_buffer_end = std::min(
output_width, out_x_buffer_start + kOutputPixelsInAccBuffer);
// We call a 'pixel' a group of activation that share all but the
// 'depth'/'channel' coordinate. num_output_pixels is the number of
// output pixels that we will accumulate in this loop iteration.
const int num_output_pixels = out_x_buffer_end - out_x_buffer_start;
// Initialize our local accumulator with the bias values, so we don't
// have to add them later.
DepthwiseConvInitAccBuffer(num_output_pixels, output_depth, bias_data,
acc_buffer);
// Accumulation loop. Most of the time should be spent in here.
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
row_accum_func(
stride_width, dilation_width_factor, input_depth, input_width,
input_data + in_y * input_height_stride + b * input_batch_stride,
input_offset, pad_width, depth_multiplier, filter_width,
filter_data + filter_y * filter_height_stride, filter_offset,
out_x_buffer_start, out_x_buffer_end, output_depth, acc_buffer);
}
// Finished accumulating int32 values. Now need to convert them to
// the final 8bit form and store them.
ruy::profiler::ScopeLabel label("downquantize+store");
const int num_output_values = output_depth * num_output_pixels;
int i = 0;
#ifdef USE_NEON
using gemmlowp::RoundingDivideByPOT;
const int32x4_t output_offset_vec = vdupq_n_s32(output_offset);
const int32x4_t output_activation_min_vec =
vdupq_n_s32(output_activation_min);
const int32x4_t output_activation_max_vec =
vdupq_n_s32(output_activation_max);
// Handle 16 values at once.
// This allows us to issue 4 mutually independent int32
// multiplications (vqrdmulh), which should alleviate most of their
// high latency.
for (; i <= num_output_values - 16; i += 16) {
int32x4_t acc[4];
for (int j = 0; j < 4; j++) {
acc[j] = vld1q_s32(acc_buffer + i + 4 * j);
}
if (!shift_left) {
// Fixed-point multiplication.
for (int j = 0; j < 4; j++) {
acc[j] = vqrdmulhq_n_s32(acc[j], output_multiplier);
}
for (int j = 0; j < 4; j++) {
acc[j] = RoundingDivideByPOT(acc[j], -output_shift);
}
} else {
// Fixed-point multiplication.
for (int j = 0; j < 4; j++) {
acc[j] = vmulq_n_s32(acc[j], multiplier_power_of_two);
acc[j] = vqrdmulhq_n_s32(acc[j], output_multiplier);
}
}
// Add the output offset.
for (int j = 0; j < 4; j++) {
acc[j] = vaddq_s32(acc[j], output_offset_vec);
}
// Apply the activation function.
for (int j = 0; j < 4; j++) {
acc[j] = vmaxq_s32(acc[j], output_activation_min_vec);
}
for (int j = 0; j < 4; j++) {
acc[j] = vminq_s32(acc[j], output_activation_max_vec);
}
// Saturating cast to uint8 and store to destination.
int16x4_t acc_s16[4];
for (int j = 0; j < 4; j++) {
acc_s16[j] = vqmovn_s32(acc[j]);
}
const int16x8_t res_s16_0 = vcombine_s16(acc_s16[0], acc_s16[1]);
const int16x8_t res_s16_1 = vcombine_s16(acc_s16[2], acc_s16[3]);
const uint8x8_t res_u8_0 = vqmovun_s16(res_s16_0);
const uint8x8_t res_u8_1 = vqmovun_s16(res_s16_1);
vst1q_u8(output_ptr, vcombine_u8(res_u8_0, res_u8_1));
output_ptr += 16;
}
// Handle 8 values at once.
// Not as good as 16 (now we're only issuing 2 mutually independent
// vqrdmulh instructions, so we're probably paying for their high
// latency).
for (; i <= num_output_values - 8; i += 8) {
int32x4_t acc0 = vld1q_s32(acc_buffer + i);
int32x4_t acc1 = vld1q_s32(acc_buffer + i + 4);
if (!shift_left) {
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
// Rounding right shift.
acc0 = RoundingDivideByPOT(acc0, -output_shift);
acc1 = RoundingDivideByPOT(acc1, -output_shift);
} else {
// Fixed-point multiplication.
acc0 = vmulq_n_s32(acc0, multiplier_power_of_two);
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc1 = vmulq_n_s32(acc1, multiplier_power_of_two);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
}
// Add the output offset.
acc0 = vaddq_s32(acc0, output_offset_vec);
acc1 = vaddq_s32(acc1, output_offset_vec);
// Apply the activation function.
acc0 = vmaxq_s32(acc0, output_activation_min_vec);
acc1 = vmaxq_s32(acc1, output_activation_min_vec);
acc0 = vminq_s32(acc0, output_activation_max_vec);
acc1 = vminq_s32(acc1, output_activation_max_vec);
// Saturating cast to uint8 and store to destination.
const int16x4_t acc0_s16 = vqmovn_s32(acc0);
const int16x4_t acc1_s16 = vqmovn_s32(acc1);
const int16x8_t res_s16 = vcombine_s16(acc0_s16, acc1_s16);
const uint8x8_t res_u8 = vqmovun_s16(res_s16);
vst1_u8(output_ptr, res_u8);
output_ptr += 8;
}
// Handle 4 values at once. Now we're paying the full price of the
// high latency of vqrdmulh. Also, storing only 4 bytes at the end
// (without any alignment) can only be done 1 byte at a time.
// Yet, that is still worth doing to minimize the amount of leftover
// that will have to go through the very slow scalar code.
for (; i <= num_output_values - 4; i += 4) {
int32x4_t acc = vld1q_s32(acc_buffer + i);
if (!shift_left) {
// Fixed-point multiplication.
acc = vqrdmulhq_n_s32(acc, output_multiplier);
// Rounding right shift.
acc = RoundingDivideByPOT(acc, -output_shift);
} else {
// Fixed-point multiplication.
acc = vmulq_n_s32(acc, multiplier_power_of_two);
acc = vqrdmulhq_n_s32(acc, output_multiplier);
}
// Add the output offset.
acc = vaddq_s32(acc, output_offset_vec);
// Apply the activation function.
acc = vmaxq_s32(acc, output_activation_min_vec);
acc = vminq_s32(acc, output_activation_max_vec);
// Saturating cast to uint8 and store to destination.
const int16x4_t acc_s16 = vqmovn_s32(acc);
const int16x8_t res_s16 = vcombine_s16(acc_s16, acc_s16);
const uint8x8_t res_u8 = vqmovun_s16(res_s16);
vst1_lane_u8(output_ptr + 0, res_u8, 0);
vst1_lane_u8(output_ptr + 1, res_u8, 1);
vst1_lane_u8(output_ptr + 2, res_u8, 2);
vst1_lane_u8(output_ptr + 3, res_u8, 3);
output_ptr += 4;
}
#endif // USE_NEON
// Handle leftover values, one by one. This is very slow.
for (; i < num_output_values; i++) {
int32 acc = acc_buffer[i];
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
*output_ptr++ = static_cast<uint8>(acc);
}
}
}
output_ptr += batch_step;
}
}
} // namespace depthwise_conv
template <DepthwiseConvOutputRounding kOutputRounding>
inline void DepthwiseConvWithRounding(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, const CpuFlags& cpu_flags, int thread_start,
int thread_end, int thread_dim) {
ruy::profiler::ScopeLabel label("DepthwiseConv/8bit");
const int depth_multiplier = params.depth_multiplier;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_GE(dilation_width_factor, 1);
TFLITE_DCHECK_GE(dilation_height_factor, 1);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_depth = input_shape.Dims(3);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
#if defined(__ANDROID__) && defined(__clang__)
// Dispatch to dot-product 3x3 kernels when supported.
if (cpu_flags.neon_dotprod) {
using optimized_ops::depthwise_conv::DotProduct3x3KernelType;
DotProduct3x3KernelType kernel_type =
optimized_ops::depthwise_conv::CategorizeDotProductKernel(
input_shape, filter_shape, output_shape, params);
if (kernel_type != DotProduct3x3KernelType::kNone) {
ruy::profiler::ScopeLabel specialized_label(
"DepthwiseConv/8bit/3x3XDotProduct");
optimized_ops::depthwise_conv::DepthwiseConvDotProduct3x3<
DepthwiseConvImplementation::kUseNeon3x3DotProduct>(
params, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, thread_start,
thread_end, thread_dim);
return;
}
}
#endif
// Dispatch to non-dot-product 3x3 kernels when supported.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int output_shift = params.output_shift;
// Call kernel optimized for depthwise convolutions using 3x3 filters if
// parameters are supported.
if (depthwise_conv::Fast3x3FilterKernelSupported(
input_shape, filter_shape, stride_width, stride_height,
dilation_width_factor, dilation_height_factor, pad_width, pad_height,
depth_multiplier, output_shape, output_shift)) {
ruy::profiler::ScopeLabel specialized_label("DepthwiseConv/8bit/3x3");
depthwise_conv::DepthwiseConv3x3Filter<kOutputRounding>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start, thread_end,
thread_dim);
return;
}
#endif
ruy::profiler::ScopeLabel specialized_label("DepthwiseConv/8bit/General");
depthwise_conv::DepthwiseConvGeneral(params, input_shape, input_data,
filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
thread_start, thread_end, thread_dim);
}
inline void DepthwiseConvImpl(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, const CpuFlags& cpu_flags, int thread_start,
int thread_end, int thread_dim) {
return DepthwiseConvWithRounding<DepthwiseConvOutputRounding::kUpward>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, cpu_flags, thread_start, thread_end,
thread_dim);
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h | C++ | apache-2.0 | 93,715 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_3X3_FILTER_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_3X3_FILTER_H_
#include <memory>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
namespace depthwise_conv {
#ifdef USE_NEON
inline int8x16_t util_vld1q_x8(const uint8* data_addr) {
return vreinterpretq_s8_u8(vld1q_u8(data_addr));
}
inline int8x16_t util_vld1q_x8(const int8* data_addr) {
return vld1q_s8(data_addr);
}
inline int8x8_t util_vld1_x8(const uint8* data_addr) {
return vreinterpret_s8_u8(vld1_u8(data_addr));
}
inline int8x8_t util_vld1_x8(const int8* data_addr) {
return vld1_s8(data_addr);
}
#endif
#define STR(s) STR_UNEXPANDED(s)
#define STR_UNEXPANDED(s) #s
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
#include <stddef.h>
// Lane operations are for clarity and convenience. We want to load and store
// 4 8-bit lanes together. So these are treated much like 32-bit loads and
// 32-bit stores. Stores require 32-bit alignment.
#define vst1_lane_8x4(dst, reg, lane_num) \
TFLITE_DCHECK_EQ(reinterpret_cast<std::uintptr_t>(dst) % 4, 0); \
vst1_lane_s32(reinterpret_cast<int32_t*>(dst), vreinterpret_s32_s8(reg), \
lane_num)
#define vst1q_lane_8x4(dst, reg, lane_num) \
TFLITE_DCHECK_EQ(reinterpret_cast<std::uintptr_t>(dst) % 4, 0); \
vst1q_lane_u32(reinterpret_cast<uint32_t*>(dst), reg, lane_num)
// Important! Most compilation configurations will compile and run without
// reinterpret_cast. Sanitizers may fail silently on lane-loading, with an
// obscure bug or mis-feature probably in unhygienic macro expansion.
#define vld1q_lane_s8x8(src, reg, lane_num) \
vreinterpretq_s8_s64(vld1q_lane_s64(reinterpret_cast<const int64_t*>(src), \
vreinterpretq_s64_s8(reg), lane_num))
#define vld1_lane_8x4(src, reg, lane_num) \
vreinterpret_s8_s32(vld1_lane_s32(reinterpret_cast<const int32*>(src), \
vreinterpret_s32_s8(reg), lane_num))
#define vld1q_lane_8x4(src, reg, lane_num) \
vld1q_lane_s32(reinterpret_cast<const int32*>(src), reg, lane_num)
#define vld1q_dup_s8x4(src) vld1q_dup_s32(reinterpret_cast<const int32*>(src))
// Represents the number of bytes offset from the start of the
// DepthwiseConvParams struct. This is used in the asm to load parameters.
// Keep these values in sync with the static_asserts below.
#define OFFSET_INPUT_DEPTH 0
#define OFFSET_INPUT_ROW_SIZE 8
#define OFFSET_OUTPUT_DEPTH 16
#define OFFSET_OUTPUT_ROW_SIZE 24
#define OFFSET_FILTER_ROW_SIZE 32
#define OFFSET_INPUT_OFFSET 40
#define OFFSET_OUTPUT_OFFSET 44
#define OFFSET_FILTER_OFFSET 48
#define OFFSET_OUTPUT_MULTIPLIER 52
#define OFFSET_OUTPUT_ACTIVATION_MIN 56
#define OFFSET_OUTPUT_ACTIVATION_MAX 60
#define OFFSET_OUTPUT_RIGHT_SHIFT 64
#define OFFSET_INPUT_WIDTH 68
#define OFFSET_INPUT_HEIGHT 72
#define OFFSET_STRIDE_WIDTH 76
#define OFFSET_STRIDE_HEIGHT 80
#define OFFSET_OUTPUT_WIDTH 84
#define OFFSET_OUTPUT_HEIGHT 88
static_assert(offsetof(DepthwiseConvParams, input_depth) == OFFSET_INPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvParams, input_row_size) ==
OFFSET_INPUT_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, output_depth) ==
OFFSET_OUTPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvParams, output_row_size) ==
OFFSET_OUTPUT_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, filter_row_size) ==
OFFSET_FILTER_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, input_offset) ==
OFFSET_INPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, output_offset) ==
OFFSET_OUTPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, filter_offset) ==
OFFSET_FILTER_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, output_multiplier) ==
OFFSET_OUTPUT_MULTIPLIER,
"");
static_assert(offsetof(DepthwiseConvParams, output_activation_min) ==
OFFSET_OUTPUT_ACTIVATION_MIN,
"");
static_assert(offsetof(DepthwiseConvParams, output_activation_max) ==
OFFSET_OUTPUT_ACTIVATION_MAX,
"");
static_assert(offsetof(DepthwiseConvParams, output_right_shift) ==
OFFSET_OUTPUT_RIGHT_SHIFT,
"");
static_assert(offsetof(DepthwiseConvParams, input_width) == OFFSET_INPUT_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, input_height) ==
OFFSET_INPUT_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, stride_width) ==
OFFSET_STRIDE_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, stride_height) ==
OFFSET_STRIDE_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, output_width) ==
OFFSET_OUTPUT_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, output_height) ==
OFFSET_OUTPUT_HEIGHT,
"");
// Dot product ops hard-coded
// Represents the number of bytes offset from the start of the
// DepthwiseConvDotProdParams struct. This is used in the asm to load
// parameters. Keep these values in sync with the static_asserts below.
#define DP_OFFSET_INPUT_DEPTH 0
#define DP_OFFSET_OUTPUT_DEPTH DP_OFFSET_INPUT_DEPTH + 8
#define DP_OFFSET_STRIDE DP_OFFSET_OUTPUT_DEPTH + 8
#define DP_OFFSET_BIAS_INCREMENT DP_OFFSET_STRIDE + 4
//
#define DP_OFFSET_INPUT_OFFSET 24
#define DP_OFFSET_OUTPUT_OFFSET DP_OFFSET_INPUT_OFFSET + 4
#define DP_OFFSET_OUTPUT_MULTIPLIER DP_OFFSET_OUTPUT_OFFSET + 4
#define DP_OFFSET_OUTPUT_SHIFT DP_OFFSET_OUTPUT_MULTIPLIER + 4
#define DP_OFFSET_QUANTIZED_ACTIVATION_MIN DP_OFFSET_OUTPUT_SHIFT + 4
#define DP_OFFSET_QUANTIZED_ACTIVATION_MAX \
DP_OFFSET_QUANTIZED_ACTIVATION_MIN + 4
//
#define DP_OFFSET_PADDING_LEFT 48
#define DP_OFFSET_PADDING_RIGHT DP_OFFSET_PADDING_LEFT + 4
#define DP_OFFSET_PADDING_TOP DP_OFFSET_PADDING_RIGHT + 4
#define DP_OFFSET_PADDING_BOTTOM DP_OFFSET_PADDING_TOP + 4
//
#define DP_OFFSET_DEPTH_MICRO_REPEATS DP_OFFSET_PADDING_BOTTOM + 4
//
#define DP_OFFSET_WIDTH_MACRO_COUNT 68
#define DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS \
DP_OFFSET_WIDTH_MACRO_COUNT + 4
#define DP_OFFSET_INPUT_WIDTH_MICRO_REPEATS \
DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS + 4
#define DP_OFFSET_RESIDUAL_WIDTH DP_OFFSET_INPUT_WIDTH_MICRO_REPEATS + 4
#define DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS \
DP_OFFSET_RESIDUAL_WIDTH + 4
#define DP_OFFSET_OUTPUT_WIDTH_MICRO_REPEATS \
DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS + 4
#define DP_OFFSET_OUTPUT_RESIDUAL_WIDTH DP_OFFSET_OUTPUT_WIDTH_MICRO_REPEATS + 4
#define DP_OFFSET_WORKSPACE_WIDTH_MICRO_REPEATS \
DP_OFFSET_OUTPUT_RESIDUAL_WIDTH + 4
//
#define DP_OFFSET_HEIGHT_MACRO_COUNT 100
#define DP_OFFSET_INBOUND_BLOCK_HEIGHT DP_OFFSET_HEIGHT_MACRO_COUNT + 4
#define DP_OFFSET_OUTBOUND_BLOCK_HEIGHT DP_OFFSET_INBOUND_BLOCK_HEIGHT + 4
#define DP_OFFSET_INPUT_HEIGHT_STRIDE DP_OFFSET_OUTBOUND_BLOCK_HEIGHT + 4
#define DP_OFFSET_OUTPUT_HEIGHT_STRIDE DP_OFFSET_INPUT_HEIGHT_STRIDE + 4
#define DP_OFFSET_WORKSPACE_HEIGHT_STRIDE DP_OFFSET_OUTPUT_HEIGHT_STRIDE + 4
//
#define DP_OFFSET_FOUR_OVER_STRIDE DP_OFFSET_WORKSPACE_HEIGHT_STRIDE + 4
//
#define DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL DP_OFFSET_FOUR_OVER_STRIDE + 4
#define DP_OFFSET_OUTPUT_SHIFT_PER_CHANNEL \
DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL + 8
static_assert(offsetof(DepthwiseConvDotProdParams, input_depth) ==
DP_OFFSET_INPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_depth) ==
DP_OFFSET_OUTPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, stride) == DP_OFFSET_STRIDE,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, bias_increment) ==
DP_OFFSET_BIAS_INCREMENT,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams, input_offset) ==
DP_OFFSET_INPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_offset) ==
DP_OFFSET_OUTPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_multiplier) ==
DP_OFFSET_OUTPUT_MULTIPLIER,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_shift) ==
DP_OFFSET_OUTPUT_SHIFT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, quantized_activation_min) ==
DP_OFFSET_QUANTIZED_ACTIVATION_MIN,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, quantized_activation_max) ==
DP_OFFSET_QUANTIZED_ACTIVATION_MAX,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams, padding_left) ==
DP_OFFSET_PADDING_LEFT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, padding_right) ==
DP_OFFSET_PADDING_RIGHT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, padding_top) ==
DP_OFFSET_PADDING_TOP,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, padding_bottom) ==
DP_OFFSET_PADDING_BOTTOM,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams, depth_micro_repeats) ==
DP_OFFSET_DEPTH_MICRO_REPEATS,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams, width_macro_count) ==
DP_OFFSET_WIDTH_MACRO_COUNT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams,
input_width_overall_micro_repeats) ==
DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, input_width_micro_repeats) ==
DP_OFFSET_INPUT_WIDTH_MICRO_REPEATS,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, residual_width) ==
DP_OFFSET_RESIDUAL_WIDTH,
"");
static_assert(offsetof(DepthwiseConvDotProdParams,
output_width_overall_micro_repeats) ==
DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS,
"");
static_assert(offsetof(DepthwiseConvDotProdParams,
output_width_micro_repeats) ==
DP_OFFSET_OUTPUT_WIDTH_MICRO_REPEATS,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_residual_width) ==
DP_OFFSET_OUTPUT_RESIDUAL_WIDTH,
"");
static_assert(offsetof(DepthwiseConvDotProdParams,
workspace_width_micro_repeats) ==
DP_OFFSET_WORKSPACE_WIDTH_MICRO_REPEATS,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams, height_macro_count) ==
DP_OFFSET_HEIGHT_MACRO_COUNT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, inbound_block_height) ==
DP_OFFSET_INBOUND_BLOCK_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, outbound_block_height) ==
DP_OFFSET_OUTBOUND_BLOCK_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, input_height_stride) ==
DP_OFFSET_INPUT_HEIGHT_STRIDE,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_height_stride) ==
DP_OFFSET_OUTPUT_HEIGHT_STRIDE,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, workspace_height_stride) ==
DP_OFFSET_WORKSPACE_HEIGHT_STRIDE,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams, four_over_stride) ==
DP_OFFSET_FOUR_OVER_STRIDE,
"");
//
static_assert(offsetof(DepthwiseConvDotProdParams,
output_multiplier_per_channel) ==
DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL,
"");
static_assert(offsetof(DepthwiseConvDotProdParams, output_shift_per_channel) ==
DP_OFFSET_OUTPUT_SHIFT_PER_CHANNEL,
"");
#endif // __aarch64__ && !GOOGLE_L4T - Dot product ops hard-coded
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
template <>
struct DepthwiseConvWindow<DepthwiseConvOutputRounding::kAwayFromZero, 8, 1,
1> {
public:
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
int64_t input_depth, int64_t input_row_size,
int32 output_window_height, int32 output_window_width,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 2 * input_depth;
const int64_t input_height_increment = 2 * input_row_size;
const int64_t output_height_increment = 2 * params_ptr->output_row_size;
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time, load inputs for a 2x1 (2
// height, 1 width) output window (4x3 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 2x1 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time, load inputs for a 1x2 (1
// height, 2 width) output window (3x4 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 1x2 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x3, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"cmp %w[output_window_height], #2\n"
"dup v26.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v29.8h, w2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v30.16b, w4\n"
"ldr w0, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v31.16b, w0\n"
"dup v28.4s, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"add x10, %[bias_ptr], #16\n"
"ldr x1, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"dup v9.8h, w9\n"
// Load filters and add offsets.
"ld1 {v0.8b}, [%[filter_ptr]], x3\n"
"ld1 {v1.8b}, [%[filter_ptr]], x3\n"
"uaddw v0.8h, v9.8h, v0.8b\n"
"ld1 {v2.8b}, [%[filter_ptr]], x3\n"
"uaddw v1.8h, v9.8h, v1.8b\n"
"ld1 {v3.8b}, [%[filter_ptr]], x3\n"
"uaddw v2.8h, v9.8h, v2.8b\n"
"ld1 {v4.8b}, [%[filter_ptr]], x3\n"
"uaddw v3.8h, v9.8h, v3.8b\n"
"ld1 {v5.8b}, [%[filter_ptr]], x3\n"
"uaddw v4.8h, v9.8h, v4.8b\n"
"ld1 {v6.8b}, [%[filter_ptr]], x3\n"
"uaddw v5.8h, v9.8h, v5.8b\n"
"ld1 {v7.8b}, [%[filter_ptr]], x3\n"
"uaddw v6.8h, v9.8h, v6.8b\n"
"ld1 {v8.8b}, [%[filter_ptr]], x3\n"
"uaddw v7.8h, v9.8h, v7.8b\n"
"uaddw v8.8h, v9.8h, v8.8b\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// This loop processes 2x2 outputs. To avoid register exhaustion,
// inputs for the left 2 outputs are loaded first, then the right
// two outputs.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, x11, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 2x1 outputs (2 height,
// 1 width) in anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// loads, otherwise jump to specific the appropriate label to handle
// smaller widths.
"cmp w5, #2\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v22.4s}, [x10]\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"subs w5, w5, #2\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"cmp w5, #3\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v25.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v25.4s, v25.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w4\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w0\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"mov x12, x11\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"add x13, x11, %[input_row_size]\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"add x14, x13, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v25.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v25.4s, v25.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w4\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w0\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v25.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v25.4s, v25.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w4\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w0\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"st1 {v23.8b}, [x7], x3\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v25.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v25.4s, v25.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w4\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w0\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v9.16b, v21.16b, v28.16b\n"
"and v12.16b, v22.16b, v28.16b\n"
"and v15.16b, v23.16b, v28.16b\n"
"and v18.16b, v24.16b, v28.16b\n"
"sshr v9.4s, v9.4s, #31\n"
"sshr v12.4s, v12.4s, #31\n"
"sshr v15.4s, v15.4s, #31\n"
"sshr v18.4s, v18.4s, #31\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v12.4s\n"
"sqadd v23.4s, v23.4s, v15.4s\n"
"sqadd v24.4s, v24.4s, v18.4s\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x12, %[input_ptr]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w5, #2\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"ld1 {v22.4s}, [x10]\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
// Load inputs for 3x4 input window which corresponds to a 1x2 output
// window.
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"subs w5, w5, #2\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"cmp w5, #3\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"add %[input_ptr], %[input_ptr], %[input_width_increment]\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"mov x12, %[input_ptr]\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"add x14, x13, %[input_row_size]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v25.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v25.4s, v25.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w4\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w0\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [%[output_ptr]], x3\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [%[output_ptr]], x3\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v25.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v25.4s, v25.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v25.4s\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w4\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w0\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"st1 {v21.8b}, [%[output_ptr]], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [%[output_ptr]], x3\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"and v9.16b, v21.16b, v28.16b\n"
"and v12.16b, v22.16b, v28.16b\n"
"sshr v9.4s, v9.4s, #31\n"
"sshr v12.4s, v12.4s, #31\n"
"sqadd v21.4s, v21.4s, v9.4s\n"
"sqadd v22.4s, v22.4s, v12.4s\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"umax v21.8b, v21.8b, v30.8b\n"
"umin v21.8b, v21.8b, v31.8b\n"
"st1 {v21.8b}, [%[output_ptr]]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvWindow<DepthwiseConvOutputRounding::kUpward, 8, 1, 1> {
public:
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
int64_t input_depth, int64_t input_row_size,
int32 output_window_height, int32 output_window_width,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 2 * input_depth;
const int64_t input_height_increment = 2 * input_row_size;
const int64_t output_height_increment = 2 * params_ptr->output_row_size;
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time, load inputs for a 2x1 (2
// height, 1 width) output window (4x3 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 2x1 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time, load inputs for a 1x2 (1
// height, 2 width) output window (3x4 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 1x2 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x3, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"cmp %w[output_window_height], #2\n"
"dup v26.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v29.8h, w2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v30.16b, w4\n"
"ldr w0, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v31.16b, w0\n"
"dup v28.4s, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"add x10, %[bias_ptr], #16\n"
"ldr x1, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"dup v9.8h, w9\n"
// Load filters and add offsets.
"ld1 {v0.8b}, [%[filter_ptr]], x3\n"
"ld1 {v1.8b}, [%[filter_ptr]], x3\n"
"uaddw v0.8h, v9.8h, v0.8b\n"
"ld1 {v2.8b}, [%[filter_ptr]], x3\n"
"uaddw v1.8h, v9.8h, v1.8b\n"
"ld1 {v3.8b}, [%[filter_ptr]], x3\n"
"uaddw v2.8h, v9.8h, v2.8b\n"
"ld1 {v4.8b}, [%[filter_ptr]], x3\n"
"uaddw v3.8h, v9.8h, v3.8b\n"
"ld1 {v5.8b}, [%[filter_ptr]], x3\n"
"uaddw v4.8h, v9.8h, v4.8b\n"
"ld1 {v6.8b}, [%[filter_ptr]], x3\n"
"uaddw v5.8h, v9.8h, v5.8b\n"
"ld1 {v7.8b}, [%[filter_ptr]], x3\n"
"uaddw v6.8h, v9.8h, v6.8b\n"
"ld1 {v8.8b}, [%[filter_ptr]], x3\n"
"uaddw v7.8h, v9.8h, v7.8b\n"
"uaddw v8.8h, v9.8h, v8.8b\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// This loop processes 2x2 outputs. To avoid register exhaustion,
// inputs for the left 2 outputs are loaded first, then the right
// two outputs.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, x11, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 2x1 outputs (2 height,
// 1 width) in anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// loads, otherwise jump to specific the appropriate label to handle
// smaller widths.
"cmp w5, #2\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v22.4s}, [x10]\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"subs w5, w5, #2\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"cmp w5, #3\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"mov x12, x11\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"add x13, x11, %[input_row_size]\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"add x14, x13, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"st1 {v23.8b}, [x7], x3\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x12, %[input_ptr]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w5, #2\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"ld1 {v22.4s}, [x10]\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
// Load inputs for 3x4 input window which corresponds to a 1x2 output
// window.
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"subs w5, w5, #2\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"cmp w5, #3\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"add %[input_ptr], %[input_ptr], %[input_width_increment]\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"mov x12, %[input_ptr]\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"add x14, x13, %[input_row_size]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [%[output_ptr]], x3\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [%[output_ptr]], x3\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v14.8h, v26.8h, v14.8b\n"
"uaddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v17.8h, v26.8h, v17.8b\n"
"uaddw v18.8h, v26.8h, v18.8b\n"
"uaddw v19.8h, v26.8h, v19.8b\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"uaddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"uaddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"st1 {v21.8b}, [%[output_ptr]], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [%[output_ptr]], x3\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"umax v21.8b, v21.8b, v30.8b\n"
"umin v21.8b, v21.8b, v31.8b\n"
"st1 {v21.8b}, [%[output_ptr]]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvWindow<DepthwiseConvOutputRounding::kAwayFromZero, 8, 2,
2> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
int64_t input_depth, int64_t input_row_size,
int32 output_window_height, int32 output_window_width,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 4 * input_depth;
const int64_t input_height_increment = 4 * input_row_size;
const int64_t output_height_increment = 2 * params_ptr->output_row_size;
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time at stride 2, a 5x5 input
// window is required. To avoid register exhaustion, we load
// the first 2 rows of the 5x5 input window into registers
// v9--v18, and use the same registers to load the next 2
// rows, and finally v9--v13 to load the last row.
// Accumulators for all 2x2 outputs are reserved by registers
// v21-v22 (top left output), v23-v24 (top right output),
// v19-v20 (bottom left output), v25-v26 (bottom right
// output).
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time at stride 2, load inputs for
// a 1x2 (1 height, 2 width) output window (3x5 input
// window). Registers v9--v24 hold input values. Mul-add with
// accumulators v24--v27.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"ldr w0, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"cmp %w[output_window_height], #2\n"
"dup v28.8h, w0\n"
"ldr w1, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.4s, w9\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w1\n"
"ldr w3, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.8h, w2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w3\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"dup v31.16b, w4\n"
"ldr x19, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"ldr w20, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
// Load filters and add offsets.
"add x10, %[bias_ptr], #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], x5\n"
"dup v9.8h, w20\n"
"ld1 {v1.8b}, [%[filter_ptr]], x5\n"
"uaddw v0.8h, v9.8h, v0.8b\n"
"ld1 {v2.8b}, [%[filter_ptr]], x5\n"
"uaddw v1.8h, v9.8h, v1.8b\n"
"ld1 {v3.8b}, [%[filter_ptr]], x5\n"
"uaddw v2.8h, v9.8h, v2.8b\n"
"ld1 {v4.8b}, [%[filter_ptr]], x5\n"
"uaddw v3.8h, v9.8h, v3.8b\n"
"ld1 {v5.8b}, [%[filter_ptr]], x5\n"
"uaddw v4.8h, v9.8h, v4.8b\n"
"ld1 {v6.8b}, [%[filter_ptr]], x5\n"
"uaddw v5.8h, v9.8h, v5.8b\n"
"ld1 {v7.8b}, [%[filter_ptr]], x5\n"
"uaddw v6.8h, v9.8h, v6.8b\n"
"ld1 {v8.8b}, [%[filter_ptr]]\n"
"uaddw v7.8h, v9.8h, v7.8b\n"
"uaddw v8.8h, v9.8h, v8.8b\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// Load the first two rows of the 5x5 input window, then reuse the
// same registers to load subsequent rows as they become available.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 1 output horizontally in
// anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// load, otherwise jump to the appropriate label to handle smaller
// widths.
"cmp w14, #2\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x19\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"ld1 {v22.4s}, [x10]\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"ld1 {v19.4s}, [%[bias_ptr]]\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v20.4s}, [x10]\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v25.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"ld1 {v26.4s}, [x10]\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"subs w14, w14, #2\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"cmp w14, #3\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"mov x12, x11\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"add x15, x13, %[input_row_size]\n"
"dup v28.4s, w9\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v27.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v27.4s, v27.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v27.4s\n"
"dup v27.4s, w1\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w3\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w4\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x6], x5\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
"dup v28.4s, w9\n"
"sqrdmulh v19.4s, v19.4s, v27.4s\n"
"sqrdmulh v20.4s, v20.4s, v27.4s\n"
"sqrdmulh v25.4s, v25.4s, v27.4s\n"
"sqrdmulh v26.4s, v26.4s, v27.4s\n"
"and v27.16b, v19.16b, v28.16b\n"
"and v29.16b, v20.16b, v28.16b\n"
"and v30.16b, v25.16b, v28.16b\n"
"and v31.16b, v26.16b, v28.16b\n"
"sshr v27.4s, v27.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v19.4s, v19.4s, v27.4s\n"
"dup v27.4s, w1\n"
"sqadd v20.4s, v20.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v25.4s, v25.4s, v30.4s\n"
"dup v30.16b, w3\n"
"sqadd v26.4s, v26.4s, v31.4s\n"
"dup v31.16b, w4\n"
"srshl v19.4s, v19.4s, v28.4s\n"
"srshl v20.4s, v20.4s, v28.4s\n"
"srshl v25.4s, v25.4s, v28.4s\n"
"srshl v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v19.8h, v19.8h, v29.8h\n"
"sqadd v25.8h, v25.8h, v29.8h\n"
"sqxtun v19.8b, v19.8h\n"
"sqxtun2 v19.16b, v25.8h\n"
"ld1 {v20.4s}, [x10]\n"
"umax v19.16b, v19.16b, v30.16b\n"
"umin v19.16b, v19.16b, v31.16b\n"
"ld1 {v26.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v19.8b}, [x7], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v25.d[0], v19.d[1]\n"
"st1 {v25.8b}, [x7], x5\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v19.4s}, [%[bias_ptr]]\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v25.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"dup v28.4s, w9\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v27.16b, v21.16b, v28.16b\n"
"and v29.16b, v22.16b, v28.16b\n"
"and v30.16b, v23.16b, v28.16b\n"
"and v31.16b, v24.16b, v28.16b\n"
"sshr v27.4s, v27.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v21.4s, v21.4s, v27.4s\n"
"dup v27.4s, w1\n"
"sqadd v22.4s, v22.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v23.4s, v23.4s, v30.4s\n"
"dup v30.16b, w3\n"
"sqadd v24.4s, v24.4s, v31.4s\n"
"dup v31.16b, w4\n"
"srshl v21.4s, v21.4s, v28.4s\n"
"srshl v22.4s, v22.4s, v28.4s\n"
"srshl v23.4s, v23.4s, v28.4s\n"
"srshl v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x6]\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
"dup v28.4s, w9\n"
"sqrdmulh v19.4s, v19.4s, v27.4s\n"
"sqrdmulh v20.4s, v20.4s, v27.4s\n"
"sqrdmulh v25.4s, v25.4s, v27.4s\n"
"sqrdmulh v26.4s, v26.4s, v27.4s\n"
"and v27.16b, v19.16b, v28.16b\n"
"and v29.16b, v20.16b, v28.16b\n"
"and v30.16b, v25.16b, v28.16b\n"
"and v31.16b, v26.16b, v28.16b\n"
"sshr v27.4s, v27.4s, #31\n"
"sshr v29.4s, v29.4s, #31\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v19.4s, v19.4s, v27.4s\n"
"dup v27.4s, w1\n"
"sqadd v20.4s, v20.4s, v29.4s\n"
"dup v29.8h, w2\n"
"sqadd v25.4s, v25.4s, v30.4s\n"
"dup v30.16b, w3\n"
"sqadd v26.4s, v26.4s, v31.4s\n"
"dup v31.16b, w4\n"
"srshl v19.4s, v19.4s, v28.4s\n"
"srshl v20.4s, v20.4s, v28.4s\n"
"srshl v25.4s, v25.4s, v28.4s\n"
"srshl v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v19.8h, v19.8h, v29.8h\n"
"sqadd v25.8h, v25.8h, v29.8h\n"
"sqxtun v19.8b, v19.8h\n"
"sqxtun2 v19.16b, v25.8h\n"
"umax v19.16b, v19.16b, v30.16b\n"
"umin v19.16b, v19.16b, v31.16b\n"
"st1 {v19.8b}, [x7], x5\n"
"mov v25.d[0], v19.d[1]\n"
"st1 {v25.8b}, [x7]\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
// Handle last column if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
// Registers v9, v10, v11, v14, v15, and v16 have already been loaded
// with the correct values at this point. This corresponds to the
// first two input rows of the top left output. Now load the last
// input row for this output. Once these inputs are no longer needed,
// load the input rows for the bottom left output.
"add x12, x15, %[input_row_size]\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v13.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v17.8b}, [x15]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v21.4s, v6.4h, v12.4h\n"
"smlal2 v22.4s, v6.8h, v12.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v7.4h, v13.4h\n"
"smlal2 v22.4s, v7.8h, v13.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v2.4h, v17.4h\n"
"smlal2 v24.4s, v2.8h, v17.8h\n"
"dup v26.4s, w9\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"and v18.16b, v21.16b, v26.16b\n"
"and v19.16b, v22.16b, v26.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v21.4s, v21.4s, v18.4s\n"
"sqadd v22.4s, v22.4s, v19.4s\n"
"srshl v21.4s, v21.4s, v26.4s\n"
"srshl v22.4s, v22.4s, v26.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"umax v21.8b, v21.8b, v30.8b\n"
"umin v21.8b, v21.8b, v31.8b\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6]\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"smlal v23.4s, v3.4h, v9.4h\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v24.4s, v3.8h, v9.8h\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"smlal v23.4s, v4.4h, v10.4h\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v24.4s, v4.8h, v10.8h\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"smlal v23.4s, v5.4h, v11.4h\n"
"smlal2 v24.4s, v5.8h, v11.8h\n"
"smlal v23.4s, v6.4h, v14.4h\n"
"smlal2 v24.4s, v6.8h, v14.8h\n"
"smlal v23.4s, v7.4h, v15.4h\n"
"smlal2 v24.4s, v7.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v16.4h\n"
"smlal2 v24.4s, v8.8h, v16.8h\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"and v18.16b, v23.16b, v26.16b\n"
"and v19.16b, v24.16b, v26.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v23.4s, v23.4s, v18.4s\n"
"sqadd v24.4s, v24.4s, v19.4s\n"
"srshl v23.4s, v23.4s, v26.4s\n"
"srshl v24.4s, v24.4s, v26.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v23.8b, v23.8h\n"
"umax v23.8b, v23.8b, v30.8b\n"
"umin v23.8b, v23.8b, v31.8b\n"
"st1 {v23.8b}, [x7]\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w14, #2\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"ld1 {v24.4s}, [%[bias_ptr]]\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"ld1 {v25.4s}, [x10]\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v26.4s}, [%[bias_ptr]]\n"
"ld1 {v27.4s}, [x10]\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"subs w14, w14, #2\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"cmp w14, #3\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"mov x12, x11\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"add x13, x12, %[input_row_size]\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"add x15, x13, %[input_row_size]\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"uaddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"uaddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"uaddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"uaddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"uaddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"dup v28.4s, w1\n"
"dup v29.4s, w9\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrdmulh v25.4s, v25.4s, v28.4s\n"
"sqrdmulh v26.4s, v26.4s, v28.4s\n"
"sqrdmulh v27.4s, v27.4s, v28.4s\n"
"dup v28.8h, w2\n"
"and v30.16b, v24.16b, v29.16b\n"
"and v31.16b, v25.16b, v29.16b\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v24.4s, v24.4s, v30.4s\n"
"sqadd v25.4s, v25.4s, v31.4s\n"
"and v30.16b, v26.16b, v29.16b\n"
"and v31.16b, v27.16b, v29.16b\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v26.4s, v26.4s, v30.4s\n"
"dup v30.16b, w3\n"
"sqadd v27.4s, v27.4s, v31.4s\n"
"dup v31.16b, w4\n"
"srshl v24.4s, v24.4s, v29.4s\n"
"srshl v25.4s, v25.4s, v29.4s\n"
"srshl v26.4s, v26.4s, v29.4s\n"
"srshl v27.4s, v27.4s, v29.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v24.8h, v24.8h, v28.8h\n"
"sqadd v26.8h, v26.8h, v28.8h\n"
"sqxtun v24.8b, v24.8h\n"
"sqxtun2 v24.16b, v26.8h\n"
"dup v28.8h, w0\n"
"ld1 {v25.4s}, [x10]\n"
"umax v24.16b, v24.16b, v30.16b\n"
"umin v24.16b, v24.16b, v31.16b\n"
"ld1 {v27.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v24.8b}, [x6], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v26.d[0], v24.d[1]\n"
"st1 {v26.8b}, [x6], x5\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v24.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"ld1 {v26.4s}, [%[bias_ptr]]\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"uaddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"uaddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"uaddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"uaddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"uaddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"dup v28.4s, w1\n"
"dup v29.4s, w9\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrdmulh v25.4s, v25.4s, v28.4s\n"
"sqrdmulh v26.4s, v26.4s, v28.4s\n"
"sqrdmulh v27.4s, v27.4s, v28.4s\n"
"dup v28.8h, w2\n"
"and v30.16b, v24.16b, v29.16b\n"
"and v31.16b, v25.16b, v29.16b\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v24.4s, v24.4s, v30.4s\n"
"sqadd v25.4s, v25.4s, v31.4s\n"
"and v30.16b, v26.16b, v29.16b\n"
"and v31.16b, v27.16b, v29.16b\n"
"sshr v30.4s, v30.4s, #31\n"
"sshr v31.4s, v31.4s, #31\n"
"sqadd v26.4s, v26.4s, v30.4s\n"
"dup v30.16b, w3\n"
"sqadd v27.4s, v27.4s, v31.4s\n"
"dup v31.16b, w4\n"
"srshl v24.4s, v24.4s, v29.4s\n"
"srshl v25.4s, v25.4s, v29.4s\n"
"srshl v26.4s, v26.4s, v29.4s\n"
"srshl v27.4s, v27.4s, v29.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v24.8h, v24.8h, v28.8h\n"
"sqadd v26.8h, v26.8h, v28.8h\n"
"sqxtun v24.8b, v24.8h\n"
"sqxtun2 v24.16b, v26.8h\n"
"dup v28.8h, w0\n"
"umax v24.16b, v24.16b, v30.16b\n"
"umin v24.16b, v24.16b, v31.16b\n"
"st1 {v24.8b}, [x6], x5\n"
"mov v26.d[0], v24.d[1]\n"
"st1 {v26.8b}, [x6]\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"dup v26.4s, w9\n"
"dup v27.4s, w1\n"
"dup v29.8h, w2\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrdmulh v25.4s, v25.4s, v27.4s\n"
"and v18.16b, v24.16b, v26.16b\n"
"and v19.16b, v25.16b, v26.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v24.4s, v24.4s, v18.4s\n"
"sqadd v25.4s, v25.4s, v19.4s\n"
"srshl v24.4s, v24.4s, v26.4s\n"
"srshl v25.4s, v25.4s, v26.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqadd v24.8h, v24.8h, v29.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v30.8b\n"
"umin v24.8b, v24.8b, v31.8b\n"
"st1 {v24.8b}, [x6]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x19", "x20");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvWindow<DepthwiseConvOutputRounding::kUpward, 8, 2, 2> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
int64_t input_depth, int64_t input_row_size,
int32 output_window_height, int32 output_window_width,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 4 * input_depth;
const int64_t input_height_increment = 4 * input_row_size;
const int64_t output_height_increment = 2 * params_ptr->output_row_size;
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time at stride 2, a 5x5 input
// window is required. To avoid register exhaustion, we load
// the first 2 rows of the 5x5 input window into registers
// v9--v18, and use the same registers to load the next 2
// rows, and finally v9--v13 to load the last row.
// Accumulators for all 2x2 outputs are reserved by registers
// v21-v22 (top left output), v23-v24 (top right output),
// v19-v20 (bottom left output), v25-v26 (bottom right
// output).
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time at stride 2, load inputs for
// a 1x2 (1 height, 2 width) output window (3x5 input
// window). Registers v9--v24 hold input values. Mul-add with
// accumulators v24--v27.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"ldr w0, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"cmp %w[output_window_height], #2\n"
"dup v28.8h, w0\n"
"ldr w1, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.4s, w9\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w1\n"
"ldr w3, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.8h, w2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w3\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"dup v31.16b, w4\n"
"ldr x19, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"ldr w20, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
// Load filters and add offsets.
"add x10, %[bias_ptr], #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], x5\n"
"dup v9.8h, w20\n"
"ld1 {v1.8b}, [%[filter_ptr]], x5\n"
"uaddw v0.8h, v9.8h, v0.8b\n"
"ld1 {v2.8b}, [%[filter_ptr]], x5\n"
"uaddw v1.8h, v9.8h, v1.8b\n"
"ld1 {v3.8b}, [%[filter_ptr]], x5\n"
"uaddw v2.8h, v9.8h, v2.8b\n"
"ld1 {v4.8b}, [%[filter_ptr]], x5\n"
"uaddw v3.8h, v9.8h, v3.8b\n"
"ld1 {v5.8b}, [%[filter_ptr]], x5\n"
"uaddw v4.8h, v9.8h, v4.8b\n"
"ld1 {v6.8b}, [%[filter_ptr]], x5\n"
"uaddw v5.8h, v9.8h, v5.8b\n"
"ld1 {v7.8b}, [%[filter_ptr]], x5\n"
"uaddw v6.8h, v9.8h, v6.8b\n"
"ld1 {v8.8b}, [%[filter_ptr]]\n"
"uaddw v7.8h, v9.8h, v7.8b\n"
"uaddw v8.8h, v9.8h, v8.8b\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// Load the first two rows of the 5x5 input window, then reuse the
// same registers to load subsequent rows as they become available.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 1 output horizontally in
// anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// load, otherwise jump to the appropriate label to handle smaller
// widths.
"cmp w14, #2\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x19\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"ld1 {v22.4s}, [x10]\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"ld1 {v19.4s}, [%[bias_ptr]]\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v20.4s}, [x10]\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v25.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"ld1 {v26.4s}, [x10]\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"subs w14, w14, #2\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"cmp w14, #3\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"mov x12, x11\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"add x15, x13, %[input_row_size]\n"
"dup v28.4s, w9\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x6], x5\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
"dup v28.4s, w9\n"
"sqrdmulh v19.4s, v19.4s, v27.4s\n"
"sqrdmulh v20.4s, v20.4s, v27.4s\n"
"sqrdmulh v25.4s, v25.4s, v27.4s\n"
"sqrdmulh v26.4s, v26.4s, v27.4s\n"
"sqrshl v19.4s, v19.4s, v28.4s\n"
"sqrshl v20.4s, v20.4s, v28.4s\n"
"sqrshl v25.4s, v25.4s, v28.4s\n"
"sqrshl v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v19.8h, v19.8h, v29.8h\n"
"sqadd v25.8h, v25.8h, v29.8h\n"
"sqxtun v19.8b, v19.8h\n"
"sqxtun2 v19.16b, v25.8h\n"
"ld1 {v20.4s}, [x10]\n"
"umax v19.16b, v19.16b, v30.16b\n"
"umin v19.16b, v19.16b, v31.16b\n"
"ld1 {v26.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v19.8b}, [x7], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v25.d[0], v19.d[1]\n"
"st1 {v25.8b}, [x7], x5\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v19.4s}, [%[bias_ptr]]\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v25.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"dup v28.4s, w9\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v28.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v28.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"sqxtun2 v21.16b, v23.8h\n"
"ld1 {v22.4s}, [x10]\n"
"umax v21.16b, v21.16b, v30.16b\n"
"umin v21.16b, v21.16b, v31.16b\n"
"ld1 {v24.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x6]\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
"dup v28.4s, w9\n"
"sqrdmulh v19.4s, v19.4s, v27.4s\n"
"sqrdmulh v20.4s, v20.4s, v27.4s\n"
"sqrdmulh v25.4s, v25.4s, v27.4s\n"
"sqrdmulh v26.4s, v26.4s, v27.4s\n"
"sqrshl v19.4s, v19.4s, v28.4s\n"
"sqrshl v20.4s, v20.4s, v28.4s\n"
"sqrshl v25.4s, v25.4s, v28.4s\n"
"sqrshl v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v19.8h, v19.8h, v29.8h\n"
"sqadd v25.8h, v25.8h, v29.8h\n"
"sqxtun v19.8b, v19.8h\n"
"sqxtun2 v19.16b, v25.8h\n"
"umax v19.16b, v19.16b, v30.16b\n"
"umin v19.16b, v19.16b, v31.16b\n"
"st1 {v19.8b}, [x7], x5\n"
"mov v25.d[0], v19.d[1]\n"
"st1 {v25.8b}, [x7]\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
// Handle last column if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
// Registers v9, v10, v11, v14, v15, and v16 have already been loaded
// with the correct values at this point. This corresponds to the
// first two input rows of the top left output. Now load the last
// input row for this output. Once these inputs are no longer needed,
// load the input rows for the bottom left output.
"add x12, x15, %[input_row_size]\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v13.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v17.8b}, [x15]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v21.4s, v6.4h, v12.4h\n"
"smlal2 v22.4s, v6.8h, v12.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v7.4h, v13.4h\n"
"smlal2 v22.4s, v7.8h, v13.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v2.4h, v17.4h\n"
"smlal2 v24.4s, v2.8h, v17.8h\n"
"dup v26.4s, w9\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v27.4s\n"
"sqrshl v21.4s, v21.4s, v26.4s\n"
"sqrshl v22.4s, v22.4s, v26.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqxtun v21.8b, v21.8h\n"
"umax v21.8b, v21.8b, v30.8b\n"
"umin v21.8b, v21.8b, v31.8b\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6]\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"smlal v23.4s, v3.4h, v9.4h\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v24.4s, v3.8h, v9.8h\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"smlal v23.4s, v4.4h, v10.4h\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v24.4s, v4.8h, v10.8h\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"smlal v23.4s, v5.4h, v11.4h\n"
"smlal2 v24.4s, v5.8h, v11.8h\n"
"smlal v23.4s, v6.4h, v14.4h\n"
"smlal2 v24.4s, v6.8h, v14.8h\n"
"smlal v23.4s, v7.4h, v15.4h\n"
"smlal2 v24.4s, v7.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v16.4h\n"
"smlal2 v24.4s, v8.8h, v16.8h\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrshl v23.4s, v23.4s, v26.4s\n"
"sqrshl v24.4s, v24.4s, v26.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtun v23.8b, v23.8h\n"
"umax v23.8b, v23.8b, v30.8b\n"
"umin v23.8b, v23.8b, v31.8b\n"
"st1 {v23.8b}, [x7]\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w14, #2\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"ld1 {v24.4s}, [%[bias_ptr]]\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"ld1 {v25.4s}, [x10]\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v26.4s}, [%[bias_ptr]]\n"
"ld1 {v27.4s}, [x10]\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"subs w14, w14, #2\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"cmp w14, #3\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"mov x12, x11\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"add x13, x12, %[input_row_size]\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"add x15, x13, %[input_row_size]\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"uaddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"uaddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"uaddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"uaddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"uaddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"dup v28.4s, w1\n"
"dup v29.4s, w9\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrdmulh v25.4s, v25.4s, v28.4s\n"
"sqrdmulh v26.4s, v26.4s, v28.4s\n"
"sqrdmulh v27.4s, v27.4s, v28.4s\n"
"dup v28.8h, w2\n"
"sqrshl v24.4s, v24.4s, v29.4s\n"
"sqrshl v25.4s, v25.4s, v29.4s\n"
"sqrshl v26.4s, v26.4s, v29.4s\n"
"sqrshl v27.4s, v27.4s, v29.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v24.8h, v24.8h, v28.8h\n"
"sqadd v26.8h, v26.8h, v28.8h\n"
"sqxtun v24.8b, v24.8h\n"
"sqxtun2 v24.16b, v26.8h\n"
"dup v28.8h, w0\n"
"ld1 {v25.4s}, [x10]\n"
"umax v24.16b, v24.16b, v30.16b\n"
"umin v24.16b, v24.16b, v31.16b\n"
"ld1 {v27.4s}, [x10]\n"
"uaddw v9.8h, v28.8h, v9.8b\n"
"st1 {v24.8b}, [x6], x5\n"
"uaddw v10.8h, v28.8h, v10.8b\n"
"mov v26.d[0], v24.d[1]\n"
"st1 {v26.8b}, [x6], x5\n"
"uaddw v11.8h, v28.8h, v11.8b\n"
"uaddw v12.8h, v28.8h, v12.8b\n"
"uaddw v13.8h, v28.8h, v13.8b\n"
"uaddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v24.4s}, [%[bias_ptr]]\n"
"uaddw v15.8h, v28.8h, v15.8b\n"
"ld1 {v26.4s}, [%[bias_ptr]]\n"
"uaddw v16.8h, v28.8h, v16.8b\n"
"uaddw v17.8h, v28.8h, v17.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"uaddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"uaddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"uaddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"uaddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"uaddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"uaddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"dup v28.4s, w1\n"
"dup v29.4s, w9\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrdmulh v25.4s, v25.4s, v28.4s\n"
"sqrdmulh v26.4s, v26.4s, v28.4s\n"
"sqrdmulh v27.4s, v27.4s, v28.4s\n"
"dup v28.8h, w2\n"
"sqrshl v24.4s, v24.4s, v29.4s\n"
"sqrshl v25.4s, v25.4s, v29.4s\n"
"sqrshl v26.4s, v26.4s, v29.4s\n"
"sqrshl v27.4s, v27.4s, v29.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v24.8h, v24.8h, v28.8h\n"
"sqadd v26.8h, v26.8h, v28.8h\n"
"sqxtun v24.8b, v24.8h\n"
"sqxtun2 v24.16b, v26.8h\n"
"dup v28.8h, w0\n"
"umax v24.16b, v24.16b, v30.16b\n"
"umin v24.16b, v24.16b, v31.16b\n"
"st1 {v24.8b}, [x6], x5\n"
"mov v26.d[0], v24.d[1]\n"
"st1 {v26.8b}, [x6]\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"dup v26.4s, w9\n"
"dup v27.4s, w1\n"
"dup v29.8h, w2\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"sqrdmulh v24.4s, v24.4s, v27.4s\n"
"sqrdmulh v25.4s, v25.4s, v27.4s\n"
"sqrshl v24.4s, v24.4s, v26.4s\n"
"sqrshl v25.4s, v25.4s, v26.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqadd v24.8h, v24.8h, v29.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v30.8b\n"
"umin v24.8b, v24.8b, v31.8b\n"
"st1 {v24.8b}, [x6]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x19", "x20");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kAwayFromZero,
EdgeType::kCenter, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 1x1 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the 1x1 input and filter values.
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w10\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"cmp x11, #16\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w10\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.16b, w10\n"
"dup v25.8h, w9\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x11, x11, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x11, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"and v18.16b, v16.16b, v29.16b\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"and v18.16b, v16.16b, v29.16b\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v8", "v16", "v17", "v18", "v19", "v25", "v26", "v27", "v28",
"v29", "v30", "v31",
// We use these general-purpose registers.
"x9", "x10", "x11");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kUpward,
EdgeType::kCenter, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 1x1 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the 1x1 input and filter values.
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w10\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"cmp x11, #16\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w10\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.16b, w10\n"
"dup v25.8h, w9\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x11, x11, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x11, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v8", "v16", "v17", "v18", "v19", "v25", "v26", "v27", "v28",
"v29", "v30", "v31",
// We use these general-purpose registers.
"x9", "x10", "x11");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kAwayFromZero,
EdgeType::kCorner, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x2 input and
// filter values.
// Load input and filter values.
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr x9, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"cmp x15, #16\n"
"add x12, %[input_ptr], x15\n"
"add x13, %[input_ptr], x9\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"add x14, x13, x15\n"
"ld1 {v9.8b}, [x12], #8\n"
"ldr x6, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x9, %[filter_ptr], x15\n"
"ld1 {v10.8b}, [x13], #8\n"
"add x10, %[filter_ptr], x6\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"add x11, x10, x15\n"
"ld1 {v1.8b}, [x9], #8\n"
"ld1 {v2.8b}, [x10], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
// Load constants.
"ldr w6, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w7\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w7\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.16b, w7\n"
"dup v25.8h, w6\n"
// Add input and filter offsets.
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x15, x15, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x15, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], #8\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"ld1 {v1.8b}, [x9], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], #8\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v2.8b}, [x10], #8\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"and v18.16b, v16.16b, v29.16b\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"and v18.16b, v16.16b, v29.16b\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18",
"v19", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
// We use these general-purpose registers.
"x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kUpward,
EdgeType::kCorner, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x2 input and
// filter values.
// Load input and filter values.
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr x9, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"cmp x15, #16\n"
"add x12, %[input_ptr], x15\n"
"add x13, %[input_ptr], x9\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"add x14, x13, x15\n"
"ld1 {v9.8b}, [x12], #8\n"
"ldr x6, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x9, %[filter_ptr], x15\n"
"ld1 {v10.8b}, [x13], #8\n"
"add x10, %[filter_ptr], x6\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"add x11, x10, x15\n"
"ld1 {v1.8b}, [x9], #8\n"
"ld1 {v2.8b}, [x10], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
// Load constants.
"ldr w6, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w7\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w7\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.16b, w7\n"
"dup v25.8h, w6\n"
// Add input and filter offsets.
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x15, x15, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x15, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], #8\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"ld1 {v1.8b}, [x9], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], #8\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v2.8b}, [x10], #8\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18",
"v19", "v25", "v26", "v27", "v28", "v29", "v30", "v31",
// We use these general-purpose registers.
"x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kAwayFromZero,
EdgeType::kHorizontal, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x3 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x3 input and
// filter values.
// Load input and filter values.
"ldr x7, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x9, %[filter_ptr]\n"
"ldr x14, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ld1 {v8.8b}, [x12], x7\n"
"add x10, x9, x14\n"
"ld1 {v9.8b}, [x12], x7\n"
"cmp x15, #16\n"
"ld1 {v10.8b}, [x12]\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13], x7\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x13], x7\n"
"ld1 {v13.8b}, [x13]\n"
"ld1 {v0.8b}, [x9], x7\n"
"ld1 {v1.8b}, [x9], x7\n"
"ld1 {v2.8b}, [x9]\n"
"ld1 {v3.8b}, [x10], x7\n"
"ld1 {v4.8b}, [x10], x7\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.8b, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.8b, w13\n"
"dup v25.8h, w12\n"
// Add input and filter offsets.
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x9, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x7\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x10, x9, x14\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], x7\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x12]\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x9], x7\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], x7\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x9], x7\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], x7\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9]\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"ld1 {v3.8b}, [x10], x7\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"ld1 {v4.8b}, [x10], x7\n"
"and v18.16b, v16.16b, v29.16b\n"
"ld1 {v5.8b}, [x10]\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"and v18.16b, v16.16b, v29.16b\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v8", "v9", "v10", "v11", "v12",
"v13", "v16", "v17", "v18", "v19", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kUpward,
EdgeType::kHorizontal, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x3 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x3 input and
// filter values.
// Load input and filter values.
"ldr x7, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x9, %[filter_ptr]\n"
"ldr x14, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ld1 {v8.8b}, [x12], x7\n"
"add x10, x9, x14\n"
"ld1 {v9.8b}, [x12], x7\n"
"cmp x15, #16\n"
"ld1 {v10.8b}, [x12]\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13], x7\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x13], x7\n"
"ld1 {v13.8b}, [x13]\n"
"ld1 {v0.8b}, [x9], x7\n"
"ld1 {v1.8b}, [x9], x7\n"
"ld1 {v2.8b}, [x9]\n"
"ld1 {v3.8b}, [x10], x7\n"
"ld1 {v4.8b}, [x10], x7\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.8b, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.8b, w13\n"
"dup v25.8h, w12\n"
// Add input and filter offsets.
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x9, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x7\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x10, x9, x14\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], x7\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x12]\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x9], x7\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], x7\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x9], x7\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], x7\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9]\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"ld1 {v3.8b}, [x10], x7\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"ld1 {v4.8b}, [x10], x7\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"ld1 {v5.8b}, [x10]\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v8", "v9", "v10", "v11", "v12",
"v13", "v16", "v17", "v18", "v19", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kAwayFromZero,
EdgeType::kVertical, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 3x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 3x2 input and
// filter values.
// Load input and filter values.
"ldr x6, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x7, %[filter_ptr]\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"add x14, x13, x11\n"
"ld1 {v8.8b}, [x12], x6\n"
"add x9, x7, x5\n"
"ld1 {v9.8b}, [x12]\n"
"cmp x15, #16\n"
"add x10, x9, x5\n"
"ld1 {v10.8b}, [x13], x6\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13]\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x14], x6\n"
"ld1 {v13.8b}, [x14]\n"
"ld1 {v0.8b}, [x7], x6\n"
"ld1 {v1.8b}, [x7]\n"
"ld1 {v2.8b}, [x9], x6\n"
"ld1 {v3.8b}, [x9]\n"
"ld1 {v4.8b}, [x10], x6\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.8b, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.8b, w13\n"
"dup v25.8h, w12\n"
// Add input and filter offsets.
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add x14, x13, x11\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x7, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x6\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x9, x7, x5\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"add x10, x9, x5\n"
"ld1 {v9.8b}, [x12]\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], x6\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x7], x6\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13]\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x7]\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x14], x6\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9], x6\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x14]\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"ld1 {v3.8b}, [x9]\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"ld1 {v4.8b}, [x10], x6\n"
"and v18.16b, v16.16b, v29.16b\n"
"ld1 {v5.8b}, [x10]\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"and v18.16b, v16.16b, v29.16b\n"
"and v19.16b, v17.16b, v29.16b\n"
"sshr v18.4s, v18.4s, #31\n"
"sshr v19.4s, v19.4s, #31\n"
"sqadd v16.4s, v16.4s, v18.4s\n"
"sqadd v17.4s, v17.4s, v19.4s\n"
"srshl v16.4s, v16.4s, v29.4s\n"
"srshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
// TODO(b/129852264): Improve testing coverage.
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v8", "v9", "v10", "v11", "v12",
"v13", "v16", "v17", "v18", "v19", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartial<DepthwiseConvOutputRounding::kUpward,
EdgeType::kVertical, 1, 1> {
static inline void Run(const uint8* input_ptr, const uint8* filter_ptr,
const int32* bias_ptr, uint8* output_ptr,
const DepthwiseConvParams* params_ptr) {
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 3x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 3x2 input and
// filter values.
// Load input and filter values.
"ldr x6, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x7, %[filter_ptr]\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"add x14, x13, x11\n"
"ld1 {v8.8b}, [x12], x6\n"
"add x9, x7, x5\n"
"ld1 {v9.8b}, [x12]\n"
"cmp x15, #16\n"
"add x10, x9, x5\n"
"ld1 {v10.8b}, [x13], x6\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13]\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x14], x6\n"
"ld1 {v13.8b}, [x14]\n"
"ld1 {v0.8b}, [x7], x6\n"
"ld1 {v1.8b}, [x7]\n"
"ld1 {v2.8b}, [x9], x6\n"
"ld1 {v3.8b}, [x9]\n"
"ld1 {v4.8b}, [x10], x6\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_MULTIPLIER) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v27.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_RIGHT_SHIFT) "]\n"
"dup v28.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.4s, w13\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.8b, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_FILTER_OFFSET) "]\n"
"dup v31.8b, w13\n"
"dup v25.8h, w12\n"
// Add input and filter offsets.
"uaddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add x14, x13, x11\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x7, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x6\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x9, x7, x5\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"add x10, x9, x5\n"
"ld1 {v9.8b}, [x12]\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], x6\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x7], x6\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13]\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x7]\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x14], x6\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9], x6\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x14]\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"ld1 {v3.8b}, [x9]\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"ld1 {v4.8b}, [x10], x6\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"ld1 {v5.8b}, [x10]\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"uaddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"uaddw v9.8h, v26.8h, v9.8b\n"
"uaddw v10.8h, v26.8h, v10.8b\n"
"uaddw v11.8h, v26.8h, v11.8b\n"
"uaddw v12.8h, v26.8h, v12.8b\n"
"uaddw v13.8h, v26.8h, v13.8b\n"
"uaddw v0.8h, v25.8h, v0.8b\n"
"uaddw v1.8h, v25.8h, v1.8b\n"
"uaddw v2.8h, v25.8h, v2.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"uaddw v3.8h, v25.8h, v3.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"uaddw v4.8h, v25.8h, v4.8b\n"
"uaddw v5.8h, v25.8h, v5.8b\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"sqrdmulh v16.4s, v16.4s, v27.4s\n"
"sqrdmulh v17.4s, v17.4s, v27.4s\n"
"sqrshl v16.4s, v16.4s, v29.4s\n"
"sqrshl v17.4s, v17.4s, v29.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtun v16.8b, v16.8h\n"
// TODO(b/129852264): Improve testing coverage.
"umax v16.8b, v16.8b, v30.8b\n"
"umin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v8", "v9", "v10", "v11", "v12",
"v13", "v16", "v17", "v18", "v19", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
#undef OFFSET_INPUT_DEPTH
#undef OFFSET_INPUT_ROW_SIZE
#undef OFFSET_OUTPUT_DEPTH
#undef OFFSET_OUTPUT_ROW_SIZE
#undef OFFSET_INPUT_OFFSET
#undef OFFSET_OUTPUT_OFFSET
#undef OFFSET_FILTER_OFFSET
#undef OFFSET_OUTPUT_MULTIPLIER
#undef OFFSET_OUTPUT_ACTIVATION_MIN
#undef OFFSET_OUTPUT_ACTIVATION_MAX
#undef OFFSET_OUTPUT_RIGHT_SHIFT
#undef OFFSET_INPUT_WIDTH
#undef OFFSET_INPUT_HEIGHT
#undef OFFSET_OUTPUT_WIDTH
#undef OFFSET_OUTPUT_HEIGHT
template <DepthwiseConvOutputRounding output_rounding, int32 kStrideWidth,
int32 kStrideHeight>
struct DepthwiseConvThroughDepth {
// Runs the DepthwiseConvWindow kernels through the depth dimension from
// |start_depth| to |end_depth|. Keep this not inlined to maintain a small
// binary size. We use a DepthwiseConvParams struct for read only params
// to minimize call overhead.
static void __attribute__((noinline))
Run(const uint8* input_ptr, const uint8* filter_ptr, const int32* bias_ptr,
uint8* output_ptr, int64_t start_depth, int64_t end_depth,
int64_t input_depth, int64_t input_row_size, int32 output_window_height,
int32 output_window_width, const DepthwiseConvParams& params) {
for (; start_depth <= end_depth - 8; start_depth += 8) {
DepthwiseConvWindow<output_rounding, 8, kStrideWidth, kStrideHeight>::Run(
input_ptr, filter_ptr, bias_ptr, output_ptr, input_depth,
input_row_size, output_window_height, output_window_width, ¶ms);
input_ptr += 8;
output_ptr += 8;
filter_ptr += 8;
bias_ptr += 8;
}
}
};
template <DepthwiseConvOutputRounding output_rounding, int32 kStrideWidth,
int32 kStrideHeight>
struct DepthwiseConvMultiRow {
using ConvKernel =
DepthwiseConvThroughDepth<output_rounding, kStrideWidth, kStrideHeight>;
static inline void Run(const uint8* input_data, int32 start_x, int32 end_x,
const uint8* filter_data, const int32* bias_data,
uint8* output_data, const DepthwiseConvParams& params,
const ShuffleParams& shuffle_params,
uint8* shuffle_workspace) {
TFLITE_DCHECK(
shuffle_params.input_height ==
get_shuffle_input_size(kStrideHeight, shuffle_params.output_height));
TFLITE_DCHECK(
shuffle_params.input_width ==
get_shuffle_input_size(kStrideWidth, shuffle_params.output_width));
TFLITE_DCHECK_LE(
64 * shuffle_params.input_width * shuffle_params.input_height,
kDepthwiseConvScratchWorkspaceSize);
int32 out_x = start_x;
// Run shuffling on inputs with sufficiently large depth and width. When
// these parameters are large enough, more time is taken to load inputs
// from memory. At this point, it becomes useful to prefetch and
// preshuffle the input data to maximize locality.
if (params.output_depth > 64 ||
(params.output_depth <= 64 && params.input_width > 150)) {
for (; out_x <= (end_x - shuffle_params.output_width);
out_x += shuffle_params.output_width) {
const uint8* input_ptr = input_data;
const int32* bias_ptr = bias_data;
const uint8* filter_ptr = filter_data;
uint8* output_ptr = output_data;
int64_t depth = 0;
const int64_t shuffle_row_size = 64 * shuffle_params.input_width;
for (; depth <= params.output_depth - 64; depth += 64) {
// Preload.
const uint8* h_ptr = input_ptr;
for (int32 i = 0; i < shuffle_params.input_height; i++) {
const uint8* ptr = h_ptr;
for (int32 j = 0; j < shuffle_params.input_width; j++) {
asm volatile("prfm pldl1keep, [%[ptr]]\n" ::[ptr] "r"(ptr) :);
ptr += params.input_depth;
}
h_ptr += params.input_row_size;
}
// For a large enough input, shuffle into buckets.
ShuffleInput(input_ptr, params.input_depth, params.input_width,
params.input_height, 64, shuffle_params.input_width,
shuffle_params.input_height, shuffle_workspace);
ConvKernel::Run(shuffle_workspace, filter_ptr, bias_ptr, output_ptr,
0, 64, 64, shuffle_row_size,
shuffle_params.output_height,
shuffle_params.output_width, params);
input_ptr += 64;
output_ptr += 64;
filter_ptr += 64;
bias_ptr += 64;
}
// Preload.
const uint8* h_ptr = input_ptr;
for (int32 i = 0; i < shuffle_params.input_height; i++) {
const uint8* ptr = h_ptr;
for (int32 j = 0; j < shuffle_params.input_width; j++) {
asm volatile("prfm pldl1keep, [%[ptr]]\n" ::[ptr] "r"(ptr) :);
ptr += params.input_depth;
}
h_ptr += params.input_row_size;
}
// Handle leftover depth.
ConvKernel::Run(input_ptr, filter_ptr, bias_ptr, output_ptr, depth,
params.output_depth, params.input_depth,
params.input_row_size, shuffle_params.output_height,
shuffle_params.output_width, params);
input_data +=
shuffle_params.output_width * kStrideWidth * params.input_depth;
output_data += shuffle_params.output_width * params.output_depth;
}
}
const int32 output_leftover_width = end_x - out_x;
if (output_leftover_width > 0) {
ConvKernel::Run(input_data, filter_data, bias_data, output_data, 0,
params.output_depth, params.input_depth,
params.input_row_size, shuffle_params.output_height,
output_leftover_width, params);
}
}
};
// Processes the borders of the input for pad_width and pad_height = 1.
// Calls 4 asm kernels:
// * 1x1 input shape.
// * Corner edges.
// * Horizontal edges.
// * Vertical edges.
template <DepthwiseConvOutputRounding output_rounding>
inline void DepthwiseConvHandlePadding(const uint8* input_data,
const uint8* filter_data,
const int32* bias_data,
uint8* output_data,
const DepthwiseConvParams& params) {
if (params.input_width == 1 && params.input_height == 1) {
const uint8* filter_ptr =
filter_data + params.filter_row_size + params.output_depth;
DepthwiseConvPartial<output_rounding, EdgeType::kCenter, 1, 1>::Run(
input_data, filter_ptr, bias_data, output_data, ¶ms);
return;
}
const int32 out_x_start_corner = 0;
const int32 out_x_end_corner = params.output_width - 1;
const int32 out_y_start_corner = 0;
const int32 out_y_end_corner = params.output_height - 1;
// Handle top row.
const uint8* input_ptr = input_data;
const uint8* filter_ptr =
filter_data + params.filter_row_size + params.output_depth;
uint8* output_ptr = output_data;
DepthwiseConvPartial<output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
input_ptr += (params.stride_width - 1) * params.input_depth;
filter_ptr = filter_data + params.filter_row_size;
output_ptr += params.output_depth;
for (int32 out_x = out_x_start_corner + 1; out_x < out_x_end_corner;
out_x++) {
DepthwiseConvPartial<output_rounding, EdgeType::kHorizontal, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
input_ptr += params.stride_width * params.input_depth;
output_ptr += params.output_depth;
}
DepthwiseConvPartial<output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
// Handle left side.
input_ptr = input_data + (params.stride_width - 1) * params.input_row_size;
filter_ptr = filter_data + params.input_depth;
output_ptr = output_data + params.output_row_size;
for (int32 out_y = out_y_start_corner + 1; out_y < out_y_end_corner;
out_y++) {
DepthwiseConvPartial<output_rounding, EdgeType::kVertical, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
input_ptr += params.stride_width * params.input_row_size;
output_ptr += params.output_row_size;
}
// Handle right side.
input_ptr = input_data + (params.input_width - 2) * params.input_depth +
(params.stride_width - 1) * params.input_row_size;
filter_ptr = filter_data;
output_ptr = output_data + params.output_row_size +
(params.output_width - 1) * params.output_depth;
for (int32 out_y = out_y_start_corner + 1; out_y < out_y_end_corner;
out_y++) {
DepthwiseConvPartial<output_rounding, EdgeType::kVertical, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
input_ptr += params.stride_width * params.input_row_size;
output_ptr += params.output_row_size;
}
// Handle bottom row.
input_ptr = input_data + (params.input_height - 2) * params.input_row_size;
filter_ptr = filter_data + params.output_depth;
output_ptr =
output_data + (params.output_height - 1) * params.output_row_size;
DepthwiseConvPartial<output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
input_ptr += (params.stride_width == 1) ? 0 : params.input_depth;
filter_ptr = filter_data;
output_ptr += params.output_depth;
for (int32 out_x = out_x_start_corner + 1; out_x < out_x_end_corner;
out_x++) {
DepthwiseConvPartial<output_rounding, EdgeType::kHorizontal, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
input_ptr += params.stride_width * params.input_depth;
output_ptr += params.output_depth;
}
DepthwiseConvPartial<output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_ptr, filter_ptr, bias_data, output_ptr, ¶ms);
}
template <DepthwiseConvOutputRounding output_rounding>
inline void DepthwiseConv3x3Filter(
const DepthwiseParams& rt_params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, int thread_start, int thread_end, int thread_dim) {
DepthwiseConvParams params;
const int32 stride_width = rt_params.stride_width;
const int32 stride_height = rt_params.stride_height;
const int32 pad_width = rt_params.padding_values.width;
const int32 pad_height = rt_params.padding_values.height;
const int32 depth_multiplier = rt_params.depth_multiplier;
const int32 output_activation_min = rt_params.quantized_activation_min;
const int32 output_activation_max = rt_params.quantized_activation_max;
const int32 input_offset = rt_params.input_offset;
const int32 filter_offset = rt_params.weights_offset;
const int32 output_offset = rt_params.output_offset;
const int32 output_multiplier = rt_params.output_multiplier;
const int32 output_shift = rt_params.output_shift;
params.input_depth = input_shape.Dims(3);
params.input_width = input_shape.Dims(2);
params.input_height = input_shape.Dims(1);
params.input_row_size = params.input_depth * params.input_width;
params.input_offset = input_offset;
params.stride_width = stride_width;
params.stride_height = stride_height;
params.output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
params.output_width = output_shape.Dims(2);
params.output_height = output_shape.Dims(1);
params.output_row_size = params.output_depth * params.output_width;
params.output_offset = output_offset;
params.filter_offset = filter_offset;
params.output_multiplier = output_multiplier;
params.output_right_shift = output_shift;
params.output_activation_min = output_activation_min;
params.output_activation_max = output_activation_max;
const int32 filter_height = filter_shape.Dims(1);
const int32 filter_width = filter_shape.Dims(2);
params.filter_row_size = params.output_depth * filter_width;
// Algorithm assumes below constraints. It is optimized for depth
// multiplier of 1, 3x3 filter, no padding and strides 1 and 2.
TFLITE_DCHECK(params.output_depth == params.input_depth * depth_multiplier);
TFLITE_DCHECK(depth_multiplier == 1);
TFLITE_DCHECK(filter_height == 3);
TFLITE_DCHECK(filter_width == 3);
TFLITE_DCHECK(stride_height == 1 || stride_height == 2);
TFLITE_DCHECK(stride_width == 1 || stride_width == 2);
TFLITE_DCHECK(stride_width == stride_height);
TFLITE_DCHECK(pad_height == 0 || pad_height == 1);
TFLITE_DCHECK(pad_width == 0 || pad_width == 1);
TFLITE_DCHECK(pad_width == pad_height);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
const int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
const int64_t input_batch_size = params.input_row_size * params.input_height;
const int64_t output_batch_size =
params.output_row_size * params.output_height;
ShuffleParams one_row_shuffle_params, two_row_shuffle_params,
four_row_shuffle_params, eight_row_shuffle_params;
if (stride_width == 1) {
one_row_shuffle_params = ShuffleParams(30, 1, 1, 1);
two_row_shuffle_params = ShuffleParams(22, 2, 1, 1);
four_row_shuffle_params = ShuffleParams(14, 4, 1, 1);
eight_row_shuffle_params = ShuffleParams(8, 8, 1, 1);
} else {
one_row_shuffle_params = ShuffleParams(14, 1, 2, 2);
two_row_shuffle_params = ShuffleParams(8, 2, 2, 2);
four_row_shuffle_params = ShuffleParams(4, 4, 2, 2);
eight_row_shuffle_params = ShuffleParams(2, 8, 2, 2);
}
using conv_multirow_func_t =
decltype(&DepthwiseConvMultiRow<output_rounding, 1, 1>::Run);
conv_multirow_func_t conv_multirow_func =
DepthwiseConvMultiRow<output_rounding, 1, 1>::Run;
if (stride_width == 2) {
conv_multirow_func = DepthwiseConvMultiRow<output_rounding, 2, 2>::Run;
}
// Allocate maximum memory needed for shuffled input.
// TODO(mariewhite): The size of this workspace is small enough to be
// allocated on the stack. Eventually we will want to move it to the heap
// and have it allocated outside of this function, like the im2col_array
// used in gemmlowp.
uint8 shuffle_workspace[kDepthwiseConvScratchWorkspaceSize];
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = params.output_height;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, params.output_height);
row_start = thread_start;
row_end = thread_end;
break;
}
for (int32 b = batch_start; b < batch_end; ++b) {
// input_ptr and output_ptr point to the start of each batch
const uint8* input_ptr = input_data + b * input_batch_size;
uint8* output_ptr = output_data + b * output_batch_size;
int32 out_x = 0;
int32 out_y = row_start;
int32 end_x = params.output_width;
int32 end_y = row_end;
if (pad_width == 1 && pad_height == 1) {
DepthwiseConvHandlePadding<output_rounding>(
input_ptr, filter_data, bias_data, output_ptr, params);
// Update extents now that the edges have been handled.
out_x = 1;
end_x = params.output_width - 1;
out_y = std::max(1, out_y);
end_y = std::min(params.output_height - 1, end_y);
}
// pad_width and pad_height can both be 0 or 1, depending on padding option,
// such as Padding_VALID / Padding_SAME.
const int in_x = (out_x * stride_width) - pad_width;
const int in_y = (out_y * stride_height) - pad_height;
// input_ptr and output_ptr point to (in_y, in_x) and (out_y, out_x),
// respectively. (in_y, in_x) and (out_y, out_x) change along with
// row_start.
input_ptr += in_y * params.input_row_size + in_x * params.input_depth;
output_ptr += out_y * params.output_row_size + out_x * params.output_depth;
// Shuffling shapes that maximize width over the shuffle workspace size
// perform better since the inputs are closer together, minimizing
// shuffling time.
//
// If the input shape has width large enough for the 2 row kernels,
// we prefer to use this. The innermost loop of the kernels handle
// 2 height x 2 width so this is the fastest path.
//
// If the input shape has smaller width but larger height, shuffling is
// still useful and can benefit from kernels 4 row and 8 row kernels.
// Handle 8 rows at a time.
if (params.input_width < four_row_shuffle_params.input_width) {
for (; out_y <= end_y - 8; out_y += 8) {
conv_multirow_func(input_ptr, out_x, end_x, filter_data, bias_data,
output_ptr, params, eight_row_shuffle_params,
shuffle_workspace);
input_ptr += 8 * stride_height * params.input_row_size;
output_ptr += 8 * params.output_row_size;
}
}
// Handle 4 rows at a time.
if (params.input_width < two_row_shuffle_params.input_width) {
for (; out_y <= end_y - 4; out_y += 4) {
conv_multirow_func(input_ptr, out_x, end_x, filter_data, bias_data,
output_ptr, params, four_row_shuffle_params,
shuffle_workspace);
input_ptr += 4 * stride_height * params.input_row_size;
output_ptr += 4 * params.output_row_size;
}
}
// Handle 2 rows at a time.
for (; out_y <= end_y - 2; out_y += 2) {
conv_multirow_func(input_ptr, out_x, end_x, filter_data, bias_data,
output_ptr, params, two_row_shuffle_params,
shuffle_workspace);
input_ptr += 2 * stride_height * params.input_row_size;
output_ptr += 2 * params.output_row_size;
}
// Handle one row at a time.
for (; out_y < end_y; out_y++) {
conv_multirow_func(input_ptr, out_x, end_x, filter_data, bias_data,
output_ptr, params, one_row_shuffle_params,
shuffle_workspace);
input_ptr += stride_height * params.input_row_size;
output_ptr += params.output_row_size;
}
}
}
#endif // __aarch64__
// Perform any necessary cache hinting and pre-writing.
template <DepthwiseConvImplementation implementation>
struct WorkspacePrefetchWrite {
static inline void Run(int8 fill_data, int size, int8* workspace) {}
};
#if defined(__aarch64__)
// Encourage the processor to keep the workspace in cache. Both the cache hint
// and some memory writes are required.
//
// This code is extremely fragile.
// Do not edit without extensive comparative performance testing.
// Do not inline without great care.
// Do not rely on results before and after getting coffee: non-thermal changes
// of more than 10% can occur with hidden underlying processor state changes.
template <>
struct WorkspacePrefetchWrite<
DepthwiseConvImplementation::kUseNeon3x3DotProduct> {
static void __attribute__((noinline))
Run(int8 fill_data, int size, int8* workspace) {
const int8x8_t fill_data_vec_int8 = vdup_n_s8(fill_data);
const uint32x2_t fill_data_vec = vreinterpret_u32_s8(fill_data_vec_int8);
for (int i = 0; i < (size - 15); i += 64) {
int8* ptr = workspace + i;
asm volatile("prfm pstl1keep, [%[ptr]]\n" ::[ptr] "r"(ptr) :);
vst1_lane_u32(reinterpret_cast<uint32_t*>(ptr), fill_data_vec, 0);
}
vst1_lane_u32(reinterpret_cast<uint32_t*>(workspace + size - 4),
fill_data_vec, 0);
}
};
#endif // __aarch64__
#if defined(__aarch64__) && !defined(GOOGLE_L4T) && defined(__ANDROID__) && \
defined(__clang__)
// Dot product ops hard-coded
template <>
struct ProcessPerDepth<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kNonPerChannelUint8> {
static inline void ProcessPerDepthNeon(
const uint8* filter_data, const int32* bias_data,
int8* shuffled_filter_data, int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[filter_data]
// x1 %[bias_data]
// x2 %[shuffled_filter_data]
// x3 %[adjusted_bias_data]
// x4 %[function_params]
#define DC_PER_DEPTH_1 "1"
#define DC_PER_DEPTH_2 "2"
asm volatile(
"ldp w12, w11, [%[function_params], #" STR(DP_OFFSET_BIAS_INCREMENT) "]\n"
"ldrsw x9, [%[function_params], #" STR(DP_OFFSET_OUTPUT_DEPTH) "]\n"
"ldr w10, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"mov x8, xzr\n"
"add w11, w11, #128\n" // =128
"sxtw x12, w12\n"
"movi v0.16b, #128\n"
"dup v1.4s, w11\n"
"lsl x11, x12, #3\n"
"lsl x12, x12, #2\n"
"movi v2.16b, #1\n"
// implicit-def: $q3
// implicit-def: $q4
// implicit-def: $q5
// implicit-def: $q6
// implicit-def: $q7
// implicit-def: $q16
// implicit-def: $q17
// implicit-def: $q18
// implicit-def: $q19
"b " DC_PER_DEPTH_2 "f\n"
DC_PER_DEPTH_1 ":\n" // in Loop: Header=BB177_2 Depth=1
"add x13, %[filter_data], x8, lsl #3\n"
"ld1 { v19.d }[0], [x13], x9\n"
"movi v21.16b, #0\n"
"movi v20.16b, #0\n"
"add x8, x8, #1\n" // =1
"ld1 { v18.d }[0], [x13], x9\n"
"ld1 { v17.d }[0], [x13], x9\n"
"zip1 v22.16b, v19.16b, v18.16b\n"
"eor v22.16b, v22.16b, v0.16b\n"
"ld1 { v16.d }[0], [x13], x9\n"
"zip1 v23.16b, v17.16b, v0.16b\n"
"eor v23.16b, v23.16b, v0.16b\n"
"zip1 v24.8h, v22.8h, v23.8h\n"
"ld1 { v7.d }[0], [x13], x9\n"
"zip2 v22.8h, v22.8h, v23.8h\n"
".word 0x4e8296d5 // sdot v21.4s, v22.16b, v2.16b\n"
".word 0x4e829714 // sdot v20.4s, v24.16b, v2.16b\n"
"ld1 { v6.d }[0], [x13], x9\n"
"zip1 v23.16b, v16.16b, v7.16b\n"
"eor v23.16b, v23.16b, v0.16b\n"
"ld1 { v5.d }[0], [x13], x9\n"
"zip1 v25.16b, v6.16b, v0.16b\n"
"eor v25.16b, v25.16b, v0.16b\n"
"zip1 v26.8h, v23.8h, v25.8h\n"
"ld1 { v4.d }[0], [x13], x9\n"
"zip2 v23.8h, v23.8h, v25.8h\n"
".word 0x4e8296f5 // sdot v21.4s, v23.16b, v2.16b\n"
".word 0x4e829754 // sdot v20.4s, v26.16b, v2.16b\n"
"ld1 { v3.d }[0], [x13]\n"
"zip1 v25.16b, v5.16b, v4.16b\n"
"stp q26, q23, [%[shuffled_filter_data], #32]\n"
"stp q24, q22, [%[shuffled_filter_data]]\n"
"zip1 v23.16b, v3.16b, v0.16b\n"
"eor v22.16b, v25.16b, v0.16b\n"
"eor v23.16b, v23.16b, v0.16b\n"
"zip1 v24.8h, v22.8h, v23.8h\n"
"zip2 v22.8h, v22.8h, v23.8h\n"
"stp q24, q22, [%[shuffled_filter_data], #64]\n"
".word 0x4e8296d5 // sdot v21.4s, v22.16b, v2.16b\n"
"ldr q22, [%[bias_data]]\n"
"ldr q23, [%[bias_data], x12]\n"
".word 0x4e829714 // sdot v20.4s, v24.16b, v2.16b\n"
"add %[shuffled_filter_data], x2, #96\n" // =96
"mla v22.4s, v20.4s, v1.4s\n"
"mla v23.4s, v21.4s, v1.4s\n"
"add %[bias_data], x1, x11\n"
"stp q22, q23, [%[adjusted_bias_data]], #32\n"
DC_PER_DEPTH_2 ":\n" // =>This Inner Loop Header: Depth=1
"cmp w8, w10\n"
"b.lt " DC_PER_DEPTH_1 "b\n"
:
// Outputs.
[ filter_data ] "+r"(filter_data),
[ bias_data ] "+r"(bias_data),
[ shuffled_filter_data ] "+r"(shuffled_filter_data),
[ adjusted_bias_data ] "+r"(adjusted_bias_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18",
"v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
// We use these general-purpose registers.
"x8", "x9", "x10", "x11", "x12", "x13");
#undef DC_PER_DEPTH_1
#undef DC_PER_DEPTH_2
}
static void __attribute__((noinline))
Run(const uint8* filter_data, const int32* bias_data,
int8* shuffled_filter_data, int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
ProcessPerDepthNeon(filter_data, bias_data, shuffled_filter_data,
adjusted_bias_data, function_params);
}
};
template <>
struct ProcessPerDepth<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kPerChannelInt8> {
static inline void ProcessPerDepthNeon(
const int8* filter_data, const int32* bias_data,
int8* shuffled_filter_data, int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[filter_data]
// x1 %[bias_data]
// x2 %[shuffled_filter_data]
// x3 %[adjusted_bias_data]
// x4 %[function_params]
#define DC_PER_DEPTH_1 "1"
#define DC_PER_DEPTH_2 "2"
#define DC_PER_DEPTH_3 "3"
asm volatile( // %bb.0:
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"cmp w8, #1\n" // =1
"b.lt " DC_PER_DEPTH_3 "f\n"
// %bb.1:
"add x10, %[function_params], #" STR(DP_OFFSET_INPUT_OFFSET) "\n" // =24
"ldrsw x11, [%[function_params], #" STR(DP_OFFSET_BIAS_INCREMENT) "]\n"
"ldrsw x9, [%[function_params], #" STR(DP_OFFSET_OUTPUT_DEPTH) "]\n"
"ld1r { v1.4s }, [x10]\n"
"movi v0.16b, #0\n"
"lsl x10, x11, #2\n"
"lsl x11, x11, #3\n"
"movi v2.16b, #1\n"
"mov x12, %[filter_data]\n"
// implicit-def: $q3
// implicit-def: $q4
// implicit-def: $q5
// implicit-def: $q6
// implicit-def: $q7
// implicit-def: $q16
// implicit-def: $q17
// implicit-def: $q18
// implicit-def: $q19
DC_PER_DEPTH_2 ":\n" // =>This Inner Loop Header: Depth=1
"add x13, %[filter_data], x9\n"
"ld1 { v3.d }[0], [x12], #8\n"
"ld1 { v4.d }[0], [x13], x9\n"
"movi v21.16b, #0\n"
"movi v20.16b, #0\n"
"subs w8, w8, #1\n" // =1
"ld1 { v5.d }[0], [x13], x9\n"
"zip1 v22.16b, v3.16b, v4.16b\n"
"mov %[filter_data], x12\n"
"ld1 { v6.d }[0], [x13], x9\n"
"zip1 v23.16b, v5.16b, v0.16b\n"
"zip1 v24.8h, v22.8h, v23.8h\n"
"zip2 v22.8h, v22.8h, v23.8h\n"
"ld1 { v7.d }[0], [x13], x9\n"
".word 0x4e8296d5 // sdot v21.4s, v22.16b, v2.16b\n"
".word 0x4e829714 // sdot v20.4s, v24.16b, v2.16b\n"
"ld1 { v16.d }[0], [x13], x9\n"
"zip1 v23.16b, v6.16b, v7.16b\n"
"ld1 { v17.d }[0], [x13], x9\n"
"zip1 v25.16b, v16.16b, v0.16b\n"
"zip1 v26.8h, v23.8h, v25.8h\n"
"zip2 v23.8h, v23.8h, v25.8h\n"
"ld1 { v18.d }[0], [x13], x9\n"
".word 0x4e8296f5 // sdot v21.4s, v23.16b, v2.16b\n"
".word 0x4e829754 // sdot v20.4s, v26.16b, v2.16b\n"
"ld1 { v19.d }[0], [x13]\n"
"zip1 v25.16b, v17.16b, v18.16b\n"
"stp q24, q22, [%[shuffled_filter_data]]\n"
"stp q26, q23, [%[shuffled_filter_data], #32]\n"
"zip1 v22.16b, v19.16b, v0.16b\n"
"zip1 v23.8h, v25.8h, v22.8h\n"
"zip2 v22.8h, v25.8h, v22.8h\n"
"stp q23, q22, [%[shuffled_filter_data], #64]\n"
".word 0x4e8296f4 // sdot v20.4s, v23.16b, v2.16b\n"
".word 0x4e8296d5 // sdot v21.4s, v22.16b, v2.16b\n"
"ldr q22, [%[bias_data]]\n"
"ldr q23, [%[bias_data], x10]\n"
"add %[shuffled_filter_data], x2, #96\n" // =96
"add %[bias_data], x1, x11\n"
"mla v22.4s, v20.4s, v1.4s\n"
"mla v23.4s, v21.4s, v1.4s\n"
"stp q22, q23, [%[adjusted_bias_data]], #32\n"
"b.ne " DC_PER_DEPTH_2 "b\n"
DC_PER_DEPTH_3 ":\n"
:
// Outputs.
[ filter_data ] "+r"(filter_data),
[ bias_data ] "+r"(bias_data),
[ shuffled_filter_data ] "+r"(shuffled_filter_data),
[ adjusted_bias_data ] "+r"(adjusted_bias_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18",
"v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26",
// We use these general-purpose registers.
"x8", "x9", "x10", "x11", "x12", "x13");
#undef DC_PER_DEPTH_1
#undef DC_PER_DEPTH_2
#undef DC_PER_DEPTH_3
}
static void __attribute__((noinline))
Run(const int8* filter_data, const int32* bias_data,
int8* shuffled_filter_data, int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
ProcessPerDepthNeon(filter_data, bias_data, shuffled_filter_data,
adjusted_bias_data, function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/0> {
static inline void PackMacroBlockNeon(
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
TFLITE_DCHECK_EQ(function_params->padding_bottom, 0);
TFLITE_DCHECK_EQ(function_params->padding_top, 0);
TFLITE_DCHECK_EQ(function_params->padding_left, 0);
TFLITE_DCHECK_EQ(function_params->padding_right, 0);
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int input_depth = function_params->input_depth;
TFLITE_DCHECK_GE(depth_micro_repeats, 0);
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
const int micro_block_size = 4 * 8;
const int depth_advance = width_overall_micro_repeats * micro_block_size;
const int width_advance =
micro_block_size *
(1 - depth_micro_repeats * width_overall_micro_repeats);
const int height_advance = workspace_height_stride -
width_overall_micro_repeats * micro_block_size;
const int input_depth_skip = 4 * input_depth - 8 * depth_micro_repeats;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg_a;
int8x16_t work_reg_b;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
// Work through one slice, by row, at a time.
int8* scratch_data_0 = scratch_block_data;
for (int k_height = 0; k_height < block_height; ++k_height) {
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data_0 = input_block_data;
int8x16_t input_data_a;
int8x16_t input_data_b;
int8x16_t input_data_c;
int8x16_t input_data_d;
// Traverse the width one point at a time, but the depth in (micro) blocks
// of size 8.
//
// The depth and width margins, which are filled with "zeros", may be
// larger than is strictly needed to calculate output. This is because the
// conv calculation is performed across complete micro blocks.
for (int j_width = 0; j_width < input_width_micro_repeats; ++j_width) {
int8x16_t work_reg_a_sp;
int8x16_t work_reg_b_sp;
int i_depth = 0;
if (depth_micro_repeats >= 2) {
i_depth += 2;
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
input_data_0 += 16;
for (; i_depth < depth_micro_repeats - 1; i_depth += 2) {
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
input_data_0 += 16;
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
}
for (; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
input_data_b =
vld1q_lane_s8x8(input_data_0 + 1 * input_depth, input_data_b, 0);
input_data_c =
vld1q_lane_s8x8(input_data_0 + 2 * input_depth, input_data_c, 0);
input_data_d =
vld1q_lane_s8x8(input_data_0 + 3 * input_depth, input_data_d, 0);
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
input_data_0 += 8;
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
if (width_overall_micro_repeats > input_width_micro_repeats) {
TFLITE_DCHECK_EQ(width_overall_micro_repeats,
input_width_micro_repeats + 1);
TFLITE_DCHECK_GT(residual_width, 0);
TFLITE_DCHECK_LT(residual_width, 4);
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
input_data_c = vdupq_n_u8(kSignBit);
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
input_data_d = vdupq_n_u8(kSignBit);
if (residual_width > 1) {
input_data_b =
vld1q_lane_s8x8(input_data_0 + input_depth, input_data_b, 0);
if (residual_width == 3) {
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
}
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
input_data_0 += 8;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
scratch_data_0 += height_advance;
input_block_data += input_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_0,
scratch_block_data + block_height * workspace_height_stride);
}
static void __attribute__((noinline))
Run(int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
PreloadInputBlock(input_block_data, function_params);
PackMacroBlockNeon(input_block_data, scratch_block_data, function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/1> {
static inline void PackMacroBlockNeon(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int input_depth = function_params->input_depth;
const int padding_left = function_params->padding_left;
const int padding_right = function_params->padding_right;
const int padding_top = function_params->padding_top;
const int padding_bottom = function_params->padding_bottom;
TFLITE_DCHECK_GT(depth_micro_repeats, 0);
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int micro_block_size = 4 * 8;
const int depth_advance = width_overall_micro_repeats * micro_block_size;
const int width_advance =
micro_block_size *
(1 - depth_micro_repeats * width_overall_micro_repeats);
const int height_advance = workspace_height_stride -
width_overall_micro_repeats * micro_block_size;
const int input_depth_skip = 4 * input_depth - 8 * depth_micro_repeats;
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params->width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params->height_macro_count - 1);
const int32 input_offset = function_params->input_offset;
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg_a;
int8x16_t work_reg_b;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
// Work through one slice, by row, at a time.
int8* scratch_data_0 = scratch_block_data;
int copy_block_height = block_height;
if (leading_height_padding) {
copy_block_height -= 1;
memset(scratch_data_0, -input_offset_difference, workspace_height_stride);
scratch_data_0 += workspace_height_stride;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data_0 = input_block_data;
int8x16_t input_data_a;
int8x16_t input_data_b;
int8x16_t input_data_c;
int8x16_t input_data_d;
// Traverse the width one point at a time, but the depth in (micro) blocks
// of size 8.
//
// The depth and width margins, which are filled with "zeros", may be
// larger than is strictly needed to calculate output. This is because the
// conv calculation is performed across complete micro blocks.
for (int j_width = 0; j_width < width_overall_micro_repeats; ++j_width) {
// Figure out division of work (available input vs zero-ed).
int adjusted_residual_width =
j_width == (input_width_micro_repeats) ? residual_width : 4;
if (trailing_width_padding &&
j_width == (width_overall_micro_repeats - 1)) {
adjusted_residual_width -= 1;
}
int start_width = 0;
if (leading_width_padding && j_width == 0) {
start_width = 1;
}
if (start_width == 0) {
if (adjusted_residual_width == 4) {
int8x16_t work_reg_a_sp;
int8x16_t work_reg_b_sp;
int i_depth = 0;
if (depth_micro_repeats >= 2) {
i_depth += 2;
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
input_data_0 += 16;
for (; i_depth < depth_micro_repeats - 1; i_depth += 2) {
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
input_data_0 += 16;
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
}
for (; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
input_data_b = vld1q_lane_s8x8(input_data_0 + 1 * input_depth,
input_data_b, 0);
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
input_data_d = vld1q_lane_s8x8(input_data_0 + 3 * input_depth,
input_data_d, 0);
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
input_data_0 += 8;
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
} else {
TFLITE_DCHECK_LT(adjusted_residual_width, 4);
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = vdupq_n_u8(-input_offset);
input_data_c = vdupq_n_u8(-input_offset);
input_data_d = vdupq_n_u8(-input_offset);
if (adjusted_residual_width > 0) {
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
if (adjusted_residual_width > 1) {
input_data_b = vld1q_lane_s8x8(input_data_0 + input_depth,
input_data_b, 0);
if (adjusted_residual_width == 3) {
input_data_c = vld1q_lane_s8x8(
input_data_0 + 2 * input_depth, input_data_c, 0);
}
}
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
input_data_0 += 8;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
} else {
if (adjusted_residual_width == 4) {
int8x16_t work_reg_a_sp;
int8x16_t work_reg_b_sp;
int i_depth = 0;
if (depth_micro_repeats >= 2) {
i_depth += 2;
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
input_data_0 += 16;
for (; i_depth < depth_micro_repeats - 1; i_depth += 2) {
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
input_data_0 += 16;
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
}
for (; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = vld1q_lane_s8x8(input_data_0 + 1 * input_depth,
input_data_b, 0);
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
input_data_d = vld1q_lane_s8x8(input_data_0 + 3 * input_depth,
input_data_d, 0);
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
input_data_0 += 8;
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
} else {
TFLITE_DCHECK_LT(adjusted_residual_width, 4);
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = vdupq_n_u8(-input_offset);
input_data_c = vdupq_n_u8(-input_offset);
input_data_d = vdupq_n_u8(-input_offset);
// Skip loading first column.
if (adjusted_residual_width > 1) {
input_data_b = vld1q_lane_s8x8(input_data_0 + input_depth,
input_data_b, 0);
if (adjusted_residual_width == 3) {
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
}
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
optimized_ops_prefetch_write_l1_keep(scratch_data_0);
optimized_ops_prefetch_write_l1_keep(scratch_data_0 + 16);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
input_data_0 += 8;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
}
}
scratch_data_0 += height_advance;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
memset(scratch_data_0, -input_offset_difference, workspace_height_stride);
scratch_data_0 += workspace_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_0,
scratch_block_data + block_height * workspace_height_stride);
}
static void __attribute__((noinline))
Run(int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
PreloadInputBlock(input_block_data, function_params);
PackMacroBlockNeon(height_block_number, width_block_number,
input_block_data, scratch_block_data, function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*max_padding=*/1> {
static inline void PackMacroBlockNeon(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int padding_left = function_params->padding_left;
const int padding_right = function_params->padding_right;
const int padding_top = function_params->padding_top;
const int padding_bottom = function_params->padding_bottom;
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
TFLITE_DCHECK_GE(workspace_height_stride, 4 * width_overall_micro_repeats);
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params->width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params->height_macro_count - 1);
const int32 input_offset = function_params->input_offset;
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
// Work through one slice, by row, at a time.
int8* scratch_data_base = scratch_block_data;
int copy_block_height = block_height;
if (leading_height_padding) {
copy_block_height -= 1;
memset(scratch_data_base, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_data_base += workspace_height_stride;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
int adjusted_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
if (trailing_width_padding) {
adjusted_residual_width -= 1;
}
int start_width = 0;
if (leading_width_padding) {
start_width = 1;
input_block_data += 1;
}
const int copy_size = (width_overall_micro_repeats - 1) * 4 +
adjusted_residual_width - start_width;
// Adjusted so that later conditionals are simplified.
const int copy_size_adjusted =
trailing_width_padding ? copy_size + 1 : copy_size;
TFLITE_DCHECK_LE(
copy_size,
input_height_stride - width_block_number * input_width_micro_repeats);
// We may drop up to stride-1 of trailing input.
TFLITE_DCHECK_GE(copy_size, input_height_stride - 1);
int scratch_data_offset = 0;
int input_block_offset = 0;
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg;
int8x8_t half_work_reg;
int8x8_t padding_mask;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
const uint8x16_t padding_reg = vdupq_n_u8(-input_offset);
padding_mask = vdup_n_s8(-1);
half_work_reg = vdup_n_s8(0);
if (copy_size >= 16) {
const int copy_remaining = (copy_size + start_width) & 0x7;
padding_mask = vreinterpret_s8_s64(vshl_s64(
vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining)));
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// The surrounding condition ensures that we always need at least one
// iteration of the main copy loop. In the case of leading width
// padding, we unroll this specially.
if (leading_width_padding) {
work_reg = util_vld1q_x8(input_block_data + input_block_offset);
work_reg = vextq_s8(padding_reg, work_reg, 15);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg = veorq_s8(work_reg, sign_bit);
}
optimized_ops_prefetch_write_l1_keep(scratch_data);
vst1q_s8(scratch_data, work_reg);
copy_done += 15;
}
// Main copy loop.
for (; (copy_done + 16) <= copy_size; copy_done += 16) {
work_reg =
util_vld1q_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg = veorq_s8(work_reg, sign_bit);
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 16, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
vst1q_s8(scratch_data + start_width + copy_done, work_reg);
}
if (copy_done + 8 <= copy_size) {
half_work_reg =
util_vld1_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 8, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
vst1_s8(scratch_data + start_width + copy_done, half_work_reg);
copy_done += 8;
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size_adjusted) {
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = util_vld1_x8(input_block_data + input_block_offset +
copy_size - 8);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (8 - copy_remaining))));
half_work_reg = vbsl_s8(vreinterpret_u8_s8(padding_mask),
vget_low_s8(padding_reg), half_work_reg);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 8, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
vst1_s8(scratch_data + start_width + copy_done, half_work_reg);
}
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done + 8);
vst1_s8(scratch_data + start_width + copy_done, half_work_reg);
vst1_s8(scratch_data + start_width + copy_done + 8, half_work_reg);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (copy_size >= 4) {
const int copy_remaining = (copy_size + start_width) & 0x3;
padding_mask = vreinterpret_s8_s64(vshl_s64(
vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining)));
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// The surrounding condition ensures that we always need at least one
// iteration of the main copy loop. In the case of leading width
// padding, we unroll this specially.
if (leading_width_padding) {
half_work_reg = vld1_lane_8x4(input_block_data + input_block_offset,
half_work_reg, 0);
half_work_reg = vext_s8(vget_low_s8(padding_reg), half_work_reg, 7);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
optimized_ops_prefetch_write_l1_keep(scratch_data);
vst1_lane_8x4(scratch_data, half_work_reg, 0);
copy_done += 3;
}
// Main copy loop.
for (; (copy_done + 4) <= copy_size; copy_done += 4) {
half_work_reg =
vld1_lane_8x4(input_block_data + input_block_offset + copy_done,
half_work_reg, 0);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 4, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
vst1_lane_8x4(scratch_data + start_width + copy_done, half_work_reg,
0);
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size_adjusted) {
TFLITE_DCHECK_LT(copy_remaining, 4);
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = vld1_lane_8x4(
input_block_data + input_block_offset + copy_size - 4,
half_work_reg, 0);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (4 - copy_remaining))));
half_work_reg = vbsl_s8(vreinterpret_u8_s8(padding_mask),
vget_low_s8(padding_reg), half_work_reg);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 4, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
vst1_lane_8x4(scratch_data + start_width + copy_done, half_work_reg,
0);
copy_done += 4;
}
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done);
optimized_ops_prefetch_write_l1_keep(scratch_data + start_width +
copy_done + 12);
vst1_lane_8x4(scratch_data + start_width + copy_done, half_work_reg, 0);
vst1_lane_8x4(scratch_data + start_width + copy_done + 4, half_work_reg,
0);
vst1_lane_8x4(scratch_data + start_width + copy_done + 8, half_work_reg,
0);
vst1_lane_8x4(scratch_data + start_width + copy_done + 12,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (width_overall_micro_repeats == 2) {
// Special case of 1 + 3 + 1, padding + copy + padding.
// This is rarely executed in practice.
TFLITE_DCHECK_EQ(copy_size, 3);
TFLITE_DCHECK_EQ(start_width, 1);
TFLITE_DCHECK(leading_width_padding);
TFLITE_DCHECK(trailing_width_padding);
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
half_work_reg = vdup_n_u8(-input_offset);
half_work_reg = vld1_lane_s8(reinterpret_cast<const int8*>(
input_block_data + input_block_offset),
half_work_reg, 1);
half_work_reg =
vld1_lane_s8(reinterpret_cast<const int8*>(input_block_data +
input_block_offset + 1),
half_work_reg, 2);
half_work_reg =
vld1_lane_s8(reinterpret_cast<const int8*>(input_block_data +
input_block_offset + 2),
half_work_reg, 3);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(scratch_data_offset % 8, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset);
vst1_s8(scratch_data_base + scratch_data_offset, half_work_reg);
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset + 4);
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset + 16);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 4,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 8,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 12,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 16,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else {
TFLITE_DCHECK_EQ(width_overall_micro_repeats, 1);
const int copy_remaining = (copy_size + start_width) & 0x3;
padding_mask = vreinterpret_s8_s64(vshl_s64(
vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining)));
if (leading_width_padding) {
padding_mask = vset_lane_u8(255, padding_mask, 0);
}
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
for (int i = 0; i < copy_size; ++i) {
half_work_reg = vreinterpret_s8_s64(
vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
half_work_reg = vld1_lane_s8(
reinterpret_cast<const int8*>(
input_block_data + input_block_offset + copy_size - 1 - i),
half_work_reg, 0);
}
if (leading_width_padding) {
half_work_reg = vreinterpret_s8_s64(
vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
}
half_work_reg = vbsl_s8(vreinterpret_u8_s8(padding_mask),
vget_low_s8(padding_reg), half_work_reg);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(scratch_data_offset % 4, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset);
vst1_lane_8x4(scratch_data_base + scratch_data_offset, half_work_reg,
0);
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset + 4);
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset + 16);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 4,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 8,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 12,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 16,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
}
scratch_data_base += copy_block_height * workspace_height_stride;
if (trailing_height_padding) {
memset(scratch_data_base, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_data_base += workspace_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_base,
scratch_block_data + block_height * workspace_height_stride);
}
static void __attribute__((noinline))
Run(int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
PreloadInputBlock(input_block_data, function_params);
PackMacroBlockNeon(height_block_number, width_block_number,
input_block_data, scratch_block_data, function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*max_padding=*/0> {
static inline void PackMacroBlockNeon(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
TFLITE_DCHECK_EQ(function_params->padding_left, 0);
TFLITE_DCHECK_EQ(function_params->padding_right, 0);
TFLITE_DCHECK_EQ(function_params->padding_top, 0);
TFLITE_DCHECK_EQ(function_params->padding_bottom, 0);
TFLITE_DCHECK_GE(workspace_height_stride, 4 * width_overall_micro_repeats);
// Work through one slice, by row, at a time.
int8* scratch_data_base = scratch_block_data;
const int copy_block_height = block_height;
int adjusted_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
const int copy_size =
(width_overall_micro_repeats - 1) * 4 + adjusted_residual_width;
TFLITE_DCHECK_LE(
copy_size,
input_height_stride - width_block_number * input_width_micro_repeats);
// We may drop up to stride-1 of trailing input.
TFLITE_DCHECK_GE(copy_size, input_height_stride - 1);
int scratch_data_offset = 0;
int input_block_offset = 0;
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg;
int8x8_t half_work_reg;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
half_work_reg = vdup_n_s8(0);
if (copy_size >= 16) {
const int copy_remaining = copy_size & 0x7;
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// Main copy loop.
for (; (copy_done + 16) <= copy_size; copy_done += 16) {
work_reg =
util_vld1q_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg = veorq_s8(work_reg, sign_bit);
}
TFLITE_DCHECK_EQ(copy_done % 16, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
vst1q_s8(scratch_data + copy_done, work_reg);
}
if (copy_done + 8 <= copy_size) {
half_work_reg =
util_vld1_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 8, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
vst1_s8(scratch_data + copy_done, half_work_reg);
copy_done += 8;
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size) {
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = util_vld1_x8(input_block_data + input_block_offset +
copy_size - 8);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (8 - copy_remaining))));
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 8, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
vst1_s8(scratch_data + copy_done, half_work_reg);
copy_done += 8;
}
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done + 8);
vst1_s8(scratch_data + copy_done, half_work_reg);
vst1_s8(scratch_data + copy_done + 8, half_work_reg);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (copy_size >= 4) {
const int copy_remaining = copy_size & 0x3;
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// Main copy loop.
for (; (copy_done + 4) <= copy_size; copy_done += 4) {
half_work_reg =
vld1_lane_8x4(input_block_data + input_block_offset + copy_done,
half_work_reg, 0);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 4, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
vst1_lane_8x4(scratch_data + copy_done, half_work_reg, 0);
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size) {
TFLITE_DCHECK_LT(copy_remaining, 4);
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = vld1_lane_8x4(
input_block_data + input_block_offset + copy_size - 4,
half_work_reg, 0);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (4 - copy_remaining))));
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 4, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
vst1_lane_8x4(scratch_data + copy_done, half_work_reg, 0);
copy_done += 4;
}
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done);
optimized_ops_prefetch_write_l1_keep(scratch_data + copy_done + 12);
vst1_lane_8x4(scratch_data + copy_done, half_work_reg, 0);
vst1_lane_8x4(scratch_data + copy_done + 4, half_work_reg, 0);
vst1_lane_8x4(scratch_data + copy_done + 8, half_work_reg, 0);
vst1_lane_8x4(scratch_data + copy_done + 12, half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else {
TFLITE_DCHECK_EQ(width_overall_micro_repeats, 1);
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
for (int i = 0; i < copy_size; ++i) {
half_work_reg = vreinterpret_s8_s64(
vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
half_work_reg = vld1_lane_s8(
reinterpret_cast<const int8*>(
input_block_data + input_block_offset + copy_size - 1 - i),
half_work_reg, 0);
}
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
TFLITE_DCHECK_EQ(scratch_data_offset % 4, 0);
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset);
vst1_lane_8x4(scratch_data_base + scratch_data_offset, half_work_reg,
0);
// Trailing guard.
optimized_ops_prefetch_write_l1_keep(scratch_data_base +
scratch_data_offset + 8);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 4,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 8,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 12,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 16,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
}
scratch_data_base += copy_block_height * workspace_height_stride;
TFLITE_DCHECK_EQ(
scratch_data_base,
scratch_block_data + block_height * workspace_height_stride);
}
static void __attribute__((noinline))
Run(int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
PreloadInputBlock(input_block_data, function_params);
PackMacroBlockNeon(height_block_number, width_block_number,
input_block_data, scratch_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/1> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_NO_MULT_1 "1"
#define DC_KERNEL_NO_MULT_2 "2"
#define DC_KERNEL_NO_MULT_3 "3"
#define DC_KERNEL_NO_MULT_4 "4"
#define DC_KERNEL_NO_MULT_5 "5"
#define DC_KERNEL_NO_MULT_6 "6"
#define DC_KERNEL_NO_MULT_7 "7"
#define DC_KERNEL_NO_MULT_8 "8"
#define DC_KERNEL_NO_MULT_9 "9"
#define DC_KERNEL_NO_MULT_10 "10"
#define DC_KERNEL_NO_MULT_11 "11"
#define DC_KERNEL_NO_MULT_12 "12"
#define DC_KERNEL_NO_MULT_13 "13"
#define DC_KERNEL_NO_MULT_14 "14"
#define DC_KERNEL_NO_MULT_15 "15"
#define DC_KERNEL_NO_MULT_16 "16"
#define DC_KERNEL_NO_MULT_17 "17"
#define DC_KERNEL_NO_MULT_18 "18"
#define DC_KERNEL_NO_MULT_19 "19"
#define DC_KERNEL_NO_MULT_20 "20"
#define DC_KERNEL_NO_MULT_21 "21"
#define DC_KERNEL_NO_MULT_22 "22"
#define DC_KERNEL_NO_MULT_23 "23"
#define DC_KERNEL_NO_MULT_24 "24"
#define DC_KERNEL_NO_MULT_25 "25"
#define DC_KERNEL_NO_MULT_26 "26"
#define DC_KERNEL_NO_MULT_27 "27"
#define DC_KERNEL_NO_MULT_28 "28"
#define DC_KERNEL_NO_MULT_29 "29"
#define DC_KERNEL_NO_MULT_30 "30"
#define DC_KERNEL_NO_MULT_31 "31"
#define DC_KERNEL_NO_MULT_32 "32"
#define DC_KERNEL_NO_MULT_33 "33"
#define DC_KERNEL_NO_MULT_34 "34"
#define DC_KERNEL_NO_MULT_35 "35"
asm volatile(
// Compiled code used block of 320 for spill out of total stack of 464.
"sub sp, sp, #320\n" // =464
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"cmp w8, #1\n" // =1
"str w8, [sp, #36]\n" // 4-byte Folded Spill
"b.lt " DC_KERNEL_NO_MULT_35 "f\n"
// %bb.1:
"ldr w8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"str xzr, [sp, #64]\n" // 8-byte Folded Spill
"str wzr, [sp, #60]\n" // 4-byte Folded Spill
"ldpsw x21, x14, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"str w8, [sp, #276]\n" // 4-byte Folded Spill
"ldr w8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_MICRO_REPEATS) "]\n"
"ldrsw x13, [%[function_params], #" STR(DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldrb w9, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "]\n"
"ldrsw x5, [%[function_params]]\n"
"str w8, [sp, #280]\n" // 4-byte Folded Spill
"ldr w8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"add x11, %[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT) "\n" // =36
"add x12, %[function_params], #" STR(DP_OFFSET_OUTPUT_MULTIPLIER) "\n" // =32
"add x10, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"str w8, [sp, #284]\n" // 4-byte Folded Spill
"ldrb w8, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "]\n"
"ld1r { v1.4s }, [x12]\n"
"ld1r { v2.4s }, [x11]\n"
"lsl x12, x14, #2\n"
"dup v7.16b, w8\n"
"fmov s5, w8\n"
"lsl x8, x13, #5\n"
"add x13, x14, x14, lsl #1\n"
"add x11, x14, x14, lsl #2\n"
"mov x26, %[output_block_data]\n"
"mov %[output_block_data], %[filter_workspace]\n"
"ldr w7, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"ld1r { v0.8h }, [x10]\n"
"dup v16.16b, w9\n"
"fmov s6, w9\n"
"lsl x15, x14, #1\n"
"lsl %[filter_workspace], x21, #1\n"
"add x27, x21, x21, lsl #1\n"
"lsl x9, x5, #1\n"
"add x10, x21, x5\n"
"stp x11, x12, [sp, #208]\n" // 16-byte Folded Spill
"add x11, x11, %[scratch_block_data]\n"
"add x12, x12, %[scratch_block_data]\n"
"str x13, [sp, #224]\n" // 8-byte Folded Spill
"add x13, x13, %[scratch_block_data]\n"
"str x8, [sp, #24]\n" // 8-byte Folded Spill
"stp x15, x14, [sp, #256]\n" // 16-byte Folded Spill
"add x8, x14, %[scratch_block_data]\n"
"add x14, x15, %[scratch_block_data]\n"
"add x15, x9, x5\n"
"add x16, x9, x27\n"
"add x17, x9, %[filter_workspace]\n"
"add x6, x9, x21\n"
"add %[function_params], x26, x9\n"
"add x9, x26, x10\n"
"add x10, x11, #32\n" // =32
"add x11, x12, #32\n" // =32
"add x12, x13, #32\n" // =32
"str x12, [sp, #312]\n" // 8-byte Folded Spill
"add x12, x14, #32\n" // =32
"str x12, [sp, #304]\n" // 8-byte Folded Spill
"add x12, x15, x27\n"
"add x13, x15, %[filter_workspace]\n"
"add x23, x15, x21\n"
"add x14, x26, x15\n"
"add x15, x27, x5\n"
"add x20, x26, x17\n"
"mov w17, w7\n"
"add x19, x26, x15\n"
"add x15, %[filter_workspace], x5\n"
"mov x22, xzr\n"
"str x14, [sp, #296]\n" // 8-byte Folded Spill
"add x14, x26, x16\n"
"add x7, x26, x6\n"
"add x16, x26, x15\n"
"add x15, x26, x13\n"
"add x6, x26, x23\n"
"and w13, w17, #0xfffffffe\n"
"lsl x23, x5, #2\n"
"dup v17.8b, v5.b[0]\n"
"dup v14.8b, v6.b[0]\n"
"add x8, x8, #32\n" // =32
"str x14, [sp, #288]\n" // 8-byte Folded Spill
"add x14, x26, x12\n"
"mov x12, xzr\n"
"str w13, [sp, #12]\n" // 4-byte Folded Spill
"mov x13, x16\n"
"stp x26, x23, [sp, #80]\n" // 16-byte Folded Spill
"add x23, x26, x21\n"
"add x22, x26, x5\n"
"mov x28, %[filter_workspace]\n"
"add %[filter_workspace], x26, x1\n"
"add x25, x26, x27\n"
"str %[scratch_block_data], [sp, #184]\n" // 8-byte Folded Spill
"str x21, [sp, #136]\n" // 8-byte Folded Spill
"str w17, [sp, #76]\n" // 4-byte Folded Spill
"str x26, [sp, #16]\n" // 8-byte Folded Spill
"stp d14, d17, [sp, #96]\n" // 16-byte Folded Spill
"stp x6, x23, [sp, #240]\n" // 16-byte Folded Spill
"b " DC_KERNEL_NO_MULT_4 "f\n"
DC_KERNEL_NO_MULT_2 ":\n" // in Loop: Header=BB225_4 Depth=1
"mov %[bias_data], x16\n"
DC_KERNEL_NO_MULT_3 ":\n" // in Loop: Header=BB225_4 Depth=1
"ldr %[output_block_data], [sp, #24]\n" // 8-byte Folded Reload
"ldr x12, [sp, #184]\n" // 8-byte Folded Reload
"ldr w17, [sp, #60]\n" // 4-byte Folded Reload
"add x12, x12, %[output_block_data]\n"
"str x12, [sp, #184]\n" // 8-byte Folded Spill
"ldr x12, [sp, #80]\n" // 8-byte Folded Reload
"add w17, w17, #1\n" // =1
"str w17, [sp, #60]\n" // 4-byte Folded Spill
"add x12, x12, #8\n" // =8
"str x12, [sp, #80]\n" // 8-byte Folded Spill
"ldr x12, [sp, #64]\n" // 8-byte Folded Reload
"add x12, x12, %[output_block_data]\n"
"str x12, [sp, #64]\n" // 8-byte Folded Spill
"ldr w12, [sp, #36]\n" // 4-byte Folded Reload
"cmp w17, w12\n"
"ldp x12, %[output_block_data], [sp, #40]\n" // 16-byte Folded Reload
"ldr w17, [sp, #76]\n" // 4-byte Folded Reload
"add x12, x12, #8\n" // =8
"b.eq " DC_KERNEL_NO_MULT_35 "f\n"
DC_KERNEL_NO_MULT_4 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB225_31 Depth 2
// Child Loop BB225_34 Depth 2
// Child Loop BB225_20 Depth 2
// Child Loop BB225_23 Depth 3
// Child Loop BB225_27 Depth 4
// Child Loop BB225_7 Depth 2
// Child Loop BB225_9 Depth 3
// Child Loop BB225_15 Depth 3
"ldp q18, q15, [%[output_block_data]]\n"
"ldp q19, q5, [%[output_block_data], #32]\n"
"ldp q20, q6, [%[output_block_data], #64]\n"
"cmp w17, #4\n" // =4
"add %[output_block_data], x3, #96\n" // =96
"stp x12, %[output_block_data], [sp, #40]\n" // 16-byte Folded Spill
"b.ne " DC_KERNEL_NO_MULT_16 "f\n"
// %bb.5: // in Loop: Header=BB225_4 Depth=1
"mov x24, x12\n"
"ldr x12, [sp, #64]\n" // 8-byte Folded Reload
"mov x16, xzr\n"
"stp q6, q5, [sp, #144]\n" // 32-byte Folded Spill
"str q15, [sp, #112]\n" // 16-byte Folded Spill
"str x12, [sp, #232]\n" // 8-byte Folded Spill
"b " DC_KERNEL_NO_MULT_7 "f\n"
DC_KERNEL_NO_MULT_6 ":\n" // in Loop: Header=BB225_7 Depth=2
"ldr x12, [sp, #232]\n" // 8-byte Folded Reload
"ldp q20, q19, [sp, #144]\n" // 32-byte Folded Reload
"add x16, x16, #1\n" // =1
"cmp x16, #2\n" // =2
"add x12, x12, #16\n" // =16
"add x24, x24, #4\n" // =4
"mov v18.16b, v15.16b\n"
"str x12, [sp, #232]\n" // 8-byte Folded Spill
"b.eq " DC_KERNEL_NO_MULT_3 "b\n"
DC_KERNEL_NO_MULT_7 ":\n" // Parent Loop BB225_4 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB225_9 Depth 3
// Child Loop BB225_15 Depth 3
"ldr x12, [sp, #184]\n" // 8-byte Folded Reload
"ldr q21, [%[bias_data]], #16\n"
"add %[output_block_data], x12, x16, lsl #4\n"
"ldr w12, [sp, #280]\n" // 4-byte Folded Reload
"ldr q22, [%[output_block_data]]\n"
"mov v31.16b, v21.16b\n"
"mov v8.16b, v21.16b\n"
"cmp w12, #1\n" // =1
"ldr x12, [sp, #264]\n" // 8-byte Folded Reload
"mov v9.16b, v21.16b\n"
"mov v10.16b, v21.16b\n"
"ldr q27, [%[output_block_data], x12]\n"
"ldr x12, [sp, #256]\n" // 8-byte Folded Reload
"ldr q26, [%[output_block_data], x12]\n"
"ldr x12, [sp, #224]\n" // 8-byte Folded Reload
".word 0x4e9a969f // sdot v31.4s, v20.16b, v26.16b\n"
"ldr q25, [%[output_block_data], x12]\n"
"ldr x12, [sp, #216]\n" // 8-byte Folded Reload
".word 0x4e9a9668 // sdot v8.4s, v19.16b, v26.16b\n"
".word 0x4e9a9649 // sdot v9.4s, v18.16b, v26.16b\n"
".word 0x4e99964a // sdot v10.4s, v18.16b, v25.16b\n"
"ldr q24, [%[output_block_data], x12]\n"
"ldr x12, [sp, #208]\n" // 8-byte Folded Reload
"ldr q23, [%[output_block_data], x12]\n"
"b.lt " DC_KERNEL_NO_MULT_11 "f\n"
// %bb.8: // in Loop: Header=BB225_7 Depth=2
"stp x24, x16, [sp, #192]\n" // 16-byte Folded Spill
"ldr w12, [sp, #280]\n" // 4-byte Folded Reload
"mov x17, x24\n"
"ldr x21, [sp, #232]\n" // 8-byte Folded Reload
"mov x24, x25\n"
"mov x25, %[filter_workspace]\n"
"mov %[filter_workspace], x22\n"
"mov x22, x23\n"
"ldr x23, [sp, #88]\n" // 8-byte Folded Reload
"shl v28.4s, v18.4s, #8\n"
"shl v29.4s, v19.4s, #8\n"
"shl v30.4s, v20.4s, #8\n"
"mov v11.16b, v23.16b\n"
"mov v12.16b, v24.16b\n"
"mov v13.16b, v27.16b\n"
"mov v14.16b, v22.16b\n"
DC_KERNEL_NO_MULT_9 ":\n" // Parent Loop BB225_4 Depth=1
// Parent Loop BB225_7 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4e8e965f // sdot v31.4s, v18.16b, v14.16b\n"
".word 0x4e8d9648 // sdot v8.4s, v18.16b, v13.16b\n"
".word 0x4e999669 // sdot v9.4s, v19.16b, v25.16b\n"
".word 0x4e8d967f // sdot v31.4s, v19.16b, v13.16b\n"
".word 0x4e8c966a // sdot v10.4s, v19.16b, v12.16b\n"
".word 0x4e999688 // sdot v8.4s, v20.16b, v25.16b\n"
".word 0x4e8c9689 // sdot v9.4s, v20.16b, v12.16b\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
".word 0x4e8b968a // sdot v10.4s, v20.16b, v11.16b\n"
"sqrdmulh v8.4s, v8.4s, v1.4s\n"
"sqrdmulh v9.4s, v9.4s, v1.4s\n"
"sqrshl v31.4s, v31.4s, v2.4s\n"
"sqrdmulh v10.4s, v10.4s, v1.4s\n"
"sqrshl v8.4s, v8.4s, v2.4s\n"
"sqrshl v9.4s, v9.4s, v2.4s\n"
"sqxtn v31.4h, v31.4s\n"
"sqrshl v10.4s, v10.4s, v2.4s\n"
"sqxtn v9.4h, v9.4s\n"
"sqxtn2 v31.8h, v8.4s\n"
"sqxtn2 v9.8h, v10.4s\n"
"sqadd v31.8h, v31.8h, v0.8h\n"
"sqadd v8.8h, v9.8h, v0.8h\n"
"sqxtun v31.8b, v31.8h\n"
"sqxtun2 v31.16b, v8.8h\n"
"umax v31.16b, v31.16b, v7.16b\n"
"add %[output_block_data], x22, x17\n"
"umin v31.16b, v31.16b, v16.16b\n"
"str s31, [x26, x17]\n"
"st1 { v31.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x25, x17\n"
"st1 { v31.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x24, x17\n"
"mov v10.16b, v21.16b\n"
"st1 { v31.s }[3], [%[output_block_data]]\n"
"mov v31.16b, v21.16b\n"
"mov v8.16b, v21.16b\n"
".word 0x4e99978a // sdot v10.4s, v28.16b, v25.16b\n"
"mov x16, x26\n"
"ldr x26, [sp, #304]\n" // 8-byte Folded Reload
".word 0x4e8e979f // sdot v31.4s, v28.16b, v14.16b\n"
".word 0x4e8d9788 // sdot v8.4s, v28.16b, v13.16b\n"
".word 0x4e8c97aa // sdot v10.4s, v29.16b, v12.16b\n"
"mov v9.16b, v21.16b\n"
".word 0x4e8d97bf // sdot v31.4s, v29.16b, v13.16b\n"
".word 0x4e9a97a8 // sdot v8.4s, v29.16b, v26.16b\n"
".word 0x4e8b97ca // sdot v10.4s, v30.16b, v11.16b\n"
"rev32 v4.8h, v26.8h\n"
".word 0x4e9a9789 // sdot v9.4s, v28.16b, v26.16b\n"
".word 0x4e9a97df // sdot v31.4s, v30.16b, v26.16b\n"
".word 0x4e9997c8 // sdot v8.4s, v30.16b, v25.16b\n"
"sqrdmulh v26.4s, v10.4s, v1.4s\n"
"rev32 v6.8h, v24.8h\n"
".word 0x4e9997a9 // sdot v9.4s, v29.16b, v25.16b\n"
"sqrdmulh v24.4s, v8.4s, v1.4s\n"
"sqrshl v8.4s, v26.4s, v2.4s\n"
"ldr q26, [x26, x21]\n"
"ldr x26, [sp, #312]\n" // 8-byte Folded Reload
"mov v17.16b, v16.16b\n"
"mov v16.16b, v7.16b\n"
"rev32 v7.8h, v23.8h\n"
".word 0x4e8c97c9 // sdot v9.4s, v30.16b, v12.16b\n"
"sqrdmulh v23.4s, v31.4s, v1.4s\n"
"rev32 v5.8h, v25.8h\n"
"sqrdmulh v25.4s, v9.4s, v1.4s\n"
"sqrshl v23.4s, v23.4s, v2.4s\n"
"add %[output_block_data], %[scratch_block_data], x21\n"
"sqrshl v31.4s, v24.4s, v2.4s\n"
"sqrshl v24.4s, v25.4s, v2.4s\n"
"sqxtn v9.4h, v23.4s\n"
"rev32 v15.8h, v22.8h\n"
"ldr q22, [%[output_block_data], #32]\n"
"rev32 v3.8h, v27.8h\n"
"sqxtn v10.4h, v24.4s\n"
"ldr q27, [x8, x21]\n"
"ldr q25, [x26, x21]\n"
"ldr q24, [x11, x21]\n"
"ldr q23, [x10, x21]\n"
"sqxtn2 v9.8h, v31.4s\n"
"sqxtn2 v10.8h, v8.4s\n"
"sqadd v31.8h, v9.8h, v0.8h\n"
"sqadd v8.8h, v10.8h, v0.8h\n"
"sqxtun v31.8b, v31.8h\n"
"sqxtun2 v31.16b, v8.8h\n"
"umax v31.16b, v31.16b, v16.16b\n"
"add %[output_block_data], x9, x17\n"
"umin v31.16b, v31.16b, v17.16b\n"
"str s31, [%[filter_workspace], x17]\n"
"st1 { v31.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x13, x17\n"
"st1 { v31.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x19, x17\n"
"mov v8.16b, v21.16b\n"
"st1 { v31.s }[3], [%[output_block_data]]\n"
"trn1 v31.8h, v15.8h, v22.8h\n"
"mov v9.16b, v21.16b\n"
"mov v10.16b, v21.16b\n"
"trn1 v3.8h, v3.8h, v27.8h\n"
"trn1 v4.8h, v4.8h, v26.8h\n"
".word 0x4e9f9648 // sdot v8.4s, v18.16b, v31.16b\n"
"mov v11.16b, v21.16b\n"
"trn1 v5.8h, v5.8h, v25.8h\n"
".word 0x4e839649 // sdot v9.4s, v18.16b, v3.16b\n"
".word 0x4e84964a // sdot v10.4s, v18.16b, v4.16b\n"
".word 0x4e839668 // sdot v8.4s, v19.16b, v3.16b\n"
"trn1 v6.8h, v6.8h, v24.8h\n"
".word 0x4e85964b // sdot v11.4s, v18.16b, v5.16b\n"
".word 0x4e849669 // sdot v9.4s, v19.16b, v4.16b\n"
".word 0x4e85966a // sdot v10.4s, v19.16b, v5.16b\n"
".word 0x4e849688 // sdot v8.4s, v20.16b, v4.16b\n"
"trn1 v7.8h, v7.8h, v23.8h\n"
".word 0x4e86966b // sdot v11.4s, v19.16b, v6.16b\n"
".word 0x4e859689 // sdot v9.4s, v20.16b, v5.16b\n"
".word 0x4e86968a // sdot v10.4s, v20.16b, v6.16b\n"
"sqrdmulh v8.4s, v8.4s, v1.4s\n"
".word 0x4e87968b // sdot v11.4s, v20.16b, v7.16b\n"
"sqrdmulh v9.4s, v9.4s, v1.4s\n"
"sqrdmulh v10.4s, v10.4s, v1.4s\n"
"sqrshl v8.4s, v8.4s, v2.4s\n"
"sqrdmulh v11.4s, v11.4s, v1.4s\n"
"sqrshl v9.4s, v9.4s, v2.4s\n"
"sqrshl v10.4s, v10.4s, v2.4s\n"
"sqxtn v8.4h, v8.4s\n"
"sqrshl v11.4s, v11.4s, v2.4s\n"
"sqxtn v10.4h, v10.4s\n"
"sqxtn2 v8.8h, v9.4s\n"
"sqxtn2 v10.8h, v11.4s\n"
"sqadd v8.8h, v8.8h, v0.8h\n"
"sqadd v9.8h, v10.8h, v0.8h\n"
"sqxtun v8.8b, v8.8h\n"
"sqxtun2 v8.16b, v9.8h\n"
"mov v9.16b, v21.16b\n"
"mov v10.16b, v21.16b\n"
"mov v11.16b, v21.16b\n"
".word 0x4e9f9789 // sdot v9.4s, v28.16b, v31.16b\n"
"mov x26, x16\n"
"ldr x16, [sp, #288]\n" // 8-byte Folded Reload
"mov v12.16b, v21.16b\n"
".word 0x4e83978a // sdot v10.4s, v28.16b, v3.16b\n"
".word 0x4e84978b // sdot v11.4s, v28.16b, v4.16b\n"
".word 0x4e8397a9 // sdot v9.4s, v29.16b, v3.16b\n"
"umax v8.16b, v8.16b, v16.16b\n"
".word 0x4e85978c // sdot v12.4s, v28.16b, v5.16b\n"
".word 0x4e8497aa // sdot v10.4s, v29.16b, v4.16b\n"
".word 0x4e8597ab // sdot v11.4s, v29.16b, v5.16b\n"
".word 0x4e8497c9 // sdot v9.4s, v30.16b, v4.16b\n"
"add %[output_block_data], x7, x17\n"
"umin v8.16b, v8.16b, v17.16b\n"
".word 0x4e8697ac // sdot v12.4s, v29.16b, v6.16b\n"
".word 0x4e8597ca // sdot v10.4s, v30.16b, v5.16b\n"
".word 0x4e8697cb // sdot v11.4s, v30.16b, v6.16b\n"
"sqrdmulh v3.4s, v9.4s, v1.4s\n"
"str s8, [%[function_params], x17]\n"
"st1 { v8.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x20, x17\n"
".word 0x4e8797cc // sdot v12.4s, v30.16b, v7.16b\n"
"sqrdmulh v4.4s, v10.4s, v1.4s\n"
"sqrdmulh v5.4s, v11.4s, v1.4s\n"
"sqrshl v3.4s, v3.4s, v2.4s\n"
"st1 { v8.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x16, x17\n"
"sqrdmulh v6.4s, v12.4s, v1.4s\n"
"sqrshl v4.4s, v4.4s, v2.4s\n"
"sqrshl v5.4s, v5.4s, v2.4s\n"
"sqxtn v3.4h, v3.4s\n"
"st1 { v8.s }[3], [%[output_block_data]]\n"
"sqrshl v6.4s, v6.4s, v2.4s\n"
"sqxtn v5.4h, v5.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqxtn2 v5.8h, v6.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqadd v4.8h, v5.8h, v0.8h\n"
"sqxtun v3.8b, v3.8h\n"
"sqxtun2 v3.16b, v4.8h\n"
"ldr x16, [sp, #296]\n" // 8-byte Folded Reload
"mov v7.16b, v16.16b\n"
"umax v3.16b, v3.16b, v7.16b\n"
"add %[output_block_data], x6, x17\n"
"umin v3.16b, v3.16b, v17.16b\n"
"str s3, [x16, x17]\n"
"st1 { v3.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x15, x17\n"
"mov v31.16b, v21.16b\n"
"mov v8.16b, v21.16b\n"
"mov v9.16b, v21.16b\n"
"mov v10.16b, v21.16b\n"
"mov v16.16b, v17.16b\n"
"st1 { v3.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x14, x17\n"
"subs w12, w12, #1\n" // =1
"add x21, x21, #32\n" // =32
".word 0x4e9a969f // sdot v31.4s, v20.16b, v26.16b\n"
".word 0x4e9a9668 // sdot v8.4s, v19.16b, v26.16b\n"
".word 0x4e9a9649 // sdot v9.4s, v18.16b, v26.16b\n"
".word 0x4e99964a // sdot v10.4s, v18.16b, v25.16b\n"
"add x17, x17, x23\n"
"mov v11.16b, v23.16b\n"
"mov v12.16b, v24.16b\n"
"mov v13.16b, v27.16b\n"
"mov v14.16b, v22.16b\n"
"st1 { v3.s }[3], [%[output_block_data]]\n"
"b.ne " DC_KERNEL_NO_MULT_9 "b\n"
// %bb.10: // in Loop: Header=BB225_7 Depth=2
"add %[output_block_data], %[scratch_block_data], x21\n"
"ldr x21, [sp, #136]\n" // 8-byte Folded Reload
"ldp d14, d17, [sp, #96]\n" // 16-byte Folded Reload
"mov x23, x22\n"
"mov x22, %[filter_workspace]\n"
"mov %[filter_workspace], x25\n"
"mov x25, x24\n"
"ldr q15, [sp, #112]\n" // 16-byte Folded Reload
"ldp x24, x16, [sp, #192]\n" // 16-byte Folded Reload
"add x12, x26, x17\n"
"ldr w17, [sp, #284]\n" // 4-byte Folded Reload
"cmp w17, #0\n" // =0
"b.gt " DC_KERNEL_NO_MULT_12 "f\n"
"b " DC_KERNEL_NO_MULT_6 "b\n"
DC_KERNEL_NO_MULT_11 ":\n" // in Loop: Header=BB225_7 Depth=2
"ldr x12, [sp, #80]\n" // 8-byte Folded Reload
"add x12, x12, x16, lsl #2\n"
"ldr w17, [sp, #284]\n" // 4-byte Folded Reload
"cmp w17, #0\n" // =0
"b.le " DC_KERNEL_NO_MULT_6 "b\n"
DC_KERNEL_NO_MULT_12 ":\n" // in Loop: Header=BB225_7 Depth=2
"ldr w17, [sp, #284]\n" // 4-byte Folded Reload
"movi v28.16b, #0\n"
"movi v29.16b, #0\n"
"movi v30.16b, #0\n"
"cmp w17, #3\n" // =3
"movi v11.16b, #0\n"
"movi v12.16b, #0\n"
"movi v13.16b, #0\n"
"b.lt " DC_KERNEL_NO_MULT_14 "f\n"
// %bb.13: // in Loop: Header=BB225_7 Depth=2
"add x17, %[output_block_data], #32\n" // =32
"ldr %[output_block_data], [sp, #264]\n" // 8-byte Folded Reload
"ldr q13, [x17]\n"
"ldr q12, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #256]\n" // 8-byte Folded Reload
"ldr q11, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #224]\n" // 8-byte Folded Reload
"ldr q30, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #216]\n" // 8-byte Folded Reload
"ldr q29, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #208]\n" // 8-byte Folded Reload
"ldr q28, [x17, %[output_block_data]]\n"
DC_KERNEL_NO_MULT_14 ":\n" // in Loop: Header=BB225_7 Depth=2
"ldr w17, [sp, #284]\n" // 4-byte Folded Reload
DC_KERNEL_NO_MULT_15 ":\n" // Parent Loop BB225_4 Depth=1
// Parent Loop BB225_7 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4e96965f // sdot v31.4s, v18.16b, v22.16b\n"
".word 0x4e9b9648 // sdot v8.4s, v18.16b, v27.16b\n"
".word 0x4e999669 // sdot v9.4s, v19.16b, v25.16b\n"
".word 0x4e9b967f // sdot v31.4s, v19.16b, v27.16b\n"
".word 0x4e98966a // sdot v10.4s, v19.16b, v24.16b\n"
".word 0x4e999688 // sdot v8.4s, v20.16b, v25.16b\n"
".word 0x4e989689 // sdot v9.4s, v20.16b, v24.16b\n"
"sqrdmulh v3.4s, v31.4s, v1.4s\n"
".word 0x4e97968a // sdot v10.4s, v20.16b, v23.16b\n"
"sqrdmulh v4.4s, v8.4s, v1.4s\n"
"sqrdmulh v5.4s, v9.4s, v1.4s\n"
"sqrshl v3.4s, v3.4s, v2.4s\n"
"sqrdmulh v6.4s, v10.4s, v1.4s\n"
"sqrshl v4.4s, v4.4s, v2.4s\n"
"sqrshl v5.4s, v5.4s, v2.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqrshl v6.4s, v6.4s, v2.4s\n"
"sqxtn v5.4h, v5.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqxtn2 v5.8h, v6.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqadd v4.8h, v5.8h, v0.8h\n"
"sqxtun v3.8b, v3.8h\n"
"sqxtun2 v3.16b, v4.8h\n"
"umax v3.16b, v3.16b, v7.16b\n"
"add %[output_block_data], x12, x21\n"
"umin v3.16b, v3.16b, v16.16b\n"
"ushr v26.4s, v26.4s, #8\n"
"ushr v25.4s, v25.4s, #8\n"
"str s3, [x12]\n"
"st1 { v3.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x12, x28\n"
"ushr v22.4s, v22.4s, #8\n"
"ushr v27.4s, v27.4s, #8\n"
"sli v26.4s, v11.4s, #24\n"
"ushr v24.4s, v24.4s, #8\n"
"ushr v23.4s, v23.4s, #8\n"
"sli v25.4s, v30.4s, #24\n"
"mov v31.16b, v21.16b\n"
"mov v8.16b, v21.16b\n"
"mov v9.16b, v21.16b\n"
"mov v10.16b, v21.16b\n"
"st1 { v3.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x12, x27\n"
"subs w17, w17, #1\n" // =1
"sli v22.4s, v13.4s, #24\n"
"ushr v13.4s, v13.4s, #8\n"
"ushr v11.4s, v11.4s, #8\n"
"sli v27.4s, v12.4s, #24\n"
"ushr v12.4s, v12.4s, #8\n"
"ushr v30.4s, v30.4s, #8\n"
"sli v24.4s, v29.4s, #24\n"
"ushr v29.4s, v29.4s, #8\n"
"sli v23.4s, v28.4s, #24\n"
"ushr v28.4s, v28.4s, #8\n"
".word 0x4e9a969f // sdot v31.4s, v20.16b, v26.16b\n"
".word 0x4e9a9668 // sdot v8.4s, v19.16b, v26.16b\n"
".word 0x4e9a9649 // sdot v9.4s, v18.16b, v26.16b\n"
"add x12, x12, x5\n"
".word 0x4e99964a // sdot v10.4s, v18.16b, v25.16b\n"
"st1 { v3.s }[3], [%[output_block_data]]\n"
"b.ne " DC_KERNEL_NO_MULT_15 "b\n"
"b " DC_KERNEL_NO_MULT_6 "b\n"
DC_KERNEL_NO_MULT_16 ":\n" // in Loop: Header=BB225_4 Depth=1
"cmp w17, #1\n" // =1
"add x16, %[bias_data], #32\n" // =32
"b.lt " DC_KERNEL_NO_MULT_2 "b\n"
// %bb.17: // in Loop: Header=BB225_4 Depth=1
"ldr w23, [sp, #276]\n" // 4-byte Folded Reload
"cmp w23, #1\n" // =1
"b.lt " DC_KERNEL_NO_MULT_29 "f\n"
// %bb.18: // in Loop: Header=BB225_4 Depth=1
"str x16, [sp, #192]\n" // 8-byte Folded Spill
"ldp q21, q22, [%[bias_data]]\n"
"ldr x17, [sp, #184]\n" // 8-byte Folded Reload
"ldr x12, [sp, #80]\n" // 8-byte Folded Reload
"ldr x23, [sp, #248]\n" // 8-byte Folded Reload
"mov w24, wzr\n"
"b " DC_KERNEL_NO_MULT_20 "f\n"
DC_KERNEL_NO_MULT_19 ":\n" // in Loop: Header=BB225_20 Depth=2
"ldr w12, [sp, #76]\n" // 4-byte Folded Reload
"add w24, w24, #1\n" // =1
"ldr x21, [sp, #136]\n" // 8-byte Folded Reload
"ldr x17, [sp, #200]\n" // 8-byte Folded Reload
"cmp w24, w12\n"
"ldr x12, [sp, #232]\n" // 8-byte Folded Reload
"add x12, x12, x21\n"
"b.eq " DC_KERNEL_NO_MULT_28 "f\n"
DC_KERNEL_NO_MULT_20 ":\n" // Parent Loop BB225_4 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB225_23 Depth 3
// Child Loop BB225_27 Depth 4
"ldr %[output_block_data], [sp, #264]\n" // 8-byte Folded Reload
"ldp q23, q24, [x17]\n"
"mov x21, x12\n"
"mov w12, wzr\n"
"add x16, x17, %[output_block_data]\n"
"ldr %[output_block_data], [sp, #256]\n" // 8-byte Folded Reload
"ldp q25, q26, [x16]\n"
"str x16, [sp, #200]\n" // 8-byte Folded Spill
"add %[output_block_data], x17, x3\n"
"ldp q27, q28, [%[output_block_data]]\n"
"str x21, [sp, #232]\n" // 8-byte Folded Spill
"b " DC_KERNEL_NO_MULT_23 "f\n"
DC_KERNEL_NO_MULT_21 ":\n" // in Loop: Header=BB225_23 Depth=3
"mov %[filter_workspace], x26\n"
DC_KERNEL_NO_MULT_22 ":\n" // in Loop: Header=BB225_23 Depth=3
"ldr w17, [sp, #276]\n" // 4-byte Folded Reload
"add w12, w12, #1\n" // =1
"cmp w12, w17\n"
"mov x17, x16\n"
"b.eq " DC_KERNEL_NO_MULT_19 "b\n"
DC_KERNEL_NO_MULT_23 ":\n" // Parent Loop BB225_4 Depth=1
// Parent Loop BB225_20 Depth=2
// => This Loop Header: Depth=3
// Child Loop BB225_27 Depth 4
"mov x26, %[filter_workspace]\n"
"ldr w1, [sp, #280]\n" // 4-byte Folded Reload
"ldr w3, [sp, #284]\n" // 4-byte Folded Reload
"add x16, x17, #32\n" // =32
"cmp w12, w1\n"
"mov w1, #4\n"
"csel w3, w3, w1, eq\n"
"cmp w3, #3\n" // =3
"b.ge " DC_KERNEL_NO_MULT_25 "f\n"
// %bb.24: // in Loop: Header=BB225_23 Depth=3
"movi v29.16b, #0\n"
"cmp w3, #1\n" // =1
"movi v30.16b, #0\n"
"movi v31.16b, #0\n"
"movi v9.16b, #0\n"
"movi v10.16b, #0\n"
"movi v8.16b, #0\n"
"b.ge " DC_KERNEL_NO_MULT_26 "f\n"
"b " DC_KERNEL_NO_MULT_21 "b\n"
DC_KERNEL_NO_MULT_25 ":\n" // in Loop: Header=BB225_23 Depth=3
"ldr x23, [sp, #264]\n" // 8-byte Folded Reload
"mov %[filter_workspace], x22\n"
"mov x22, x15\n"
"mov x15, x14\n"
"add x23, x16, x23\n"
"mov x14, x13\n"
"mov x13, x20\n"
"mov x20, x16\n"
"mov x16, x25\n"
"ldr x25, [sp, #256]\n" // 8-byte Folded Reload
"ldp q8, q31, [x17, #32]\n"
"ldp q10, q30, [x23]\n"
"ldp x6, x23, [sp, #240]\n" // 16-byte Folded Reload
"add x25, x20, x25\n"
"ldp q9, q29, [x25]\n"
"mov x25, x16\n"
"mov x16, x20\n"
"mov x20, x13\n"
"mov x13, x14\n"
"mov x14, x15\n"
"mov x15, x22\n"
"mov x22, %[filter_workspace]\n"
"mov %[bias_data], x7\n"
DC_KERNEL_NO_MULT_26 ":\n" // in Loop: Header=BB225_23 Depth=3
"mov %[filter_workspace], x26\n"
DC_KERNEL_NO_MULT_27 ":\n" // Parent Loop BB225_4 Depth=1
// Parent Loop BB225_20 Depth=2
// Parent Loop BB225_23 Depth=3
// => This Inner Loop Header: Depth=4
"mov v3.16b, v21.16b\n"
"mov v4.16b, v22.16b\n"
".word 0x4e979643 // sdot v3.4s, v18.16b, v23.16b\n"
".word 0x4e9895e4 // sdot v4.4s, v15.16b, v24.16b\n"
".word 0x4e999663 // sdot v3.4s, v19.16b, v25.16b\n"
".word 0x4e9a94a4 // sdot v4.4s, v5.16b, v26.16b\n"
".word 0x4e9b9683 // sdot v3.4s, v20.16b, v27.16b\n"
".word 0x4e9c94c4 // sdot v4.4s, v6.16b, v28.16b\n"
"sqrdmulh v3.4s, v3.4s, v1.4s\n"
"sqrdmulh v4.4s, v4.4s, v1.4s\n"
"sqrshl v3.4s, v3.4s, v2.4s\n"
"sqrshl v4.4s, v4.4s, v2.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqxtun v3.8b, v3.8h\n"
"umax v3.8b, v3.8b, v17.8b\n"
"ushr v23.4s, v23.4s, #8\n"
"ushr v24.4s, v24.4s, #8\n"
"ushr v25.4s, v25.4s, #8\n"
"ushr v26.4s, v26.4s, #8\n"
"ushr v27.4s, v27.4s, #8\n"
"ushr v28.4s, v28.4s, #8\n"
"umin v3.8b, v3.8b, v14.8b\n"
"subs w3, w3, #1\n" // =1
"sli v23.4s, v8.4s, #24\n"
"ushr v8.4s, v8.4s, #8\n"
"sli v24.4s, v31.4s, #24\n"
"ushr v31.4s, v31.4s, #8\n"
"sli v25.4s, v10.4s, #24\n"
"ushr v10.4s, v10.4s, #8\n"
"sli v26.4s, v30.4s, #24\n"
"ushr v30.4s, v30.4s, #8\n"
"sli v27.4s, v9.4s, #24\n"
"ushr v9.4s, v9.4s, #8\n"
"sli v28.4s, v29.4s, #24\n"
"ushr v29.4s, v29.4s, #8\n"
"str d3, [x21]\n"
"add x21, x21, x5\n"
"b.ne " DC_KERNEL_NO_MULT_27 "b\n"
"b " DC_KERNEL_NO_MULT_22 "b\n"
DC_KERNEL_NO_MULT_28 ":\n" // in Loop: Header=BB225_4 Depth=1
"ldr %[bias_data], [sp, #192]\n" // 8-byte Folded Reload
"ldr x26, [sp, #16]\n" // 8-byte Folded Reload
"b " DC_KERNEL_NO_MULT_3 "b\n"
DC_KERNEL_NO_MULT_29 ":\n" // in Loop: Header=BB225_4 Depth=1
"ldr w12, [sp, #12]\n" // 4-byte Folded Reload
"cmp w17, #2\n" // =2
"b.hs " DC_KERNEL_NO_MULT_31 "f\n"
// %bb.30: // in Loop: Header=BB225_4 Depth=1
"ldr x23, [sp, #248]\n" // 8-byte Folded Reload
"mov w12, wzr\n"
"b " DC_KERNEL_NO_MULT_33 "f\n"
DC_KERNEL_NO_MULT_31 ":\n" // Parent Loop BB225_4 Depth=1
// => This Inner Loop Header: Depth=2
"subs w12, w12, #2\n" // =2
"b.ne " DC_KERNEL_NO_MULT_31 "b\n"
// %bb.32: // in Loop: Header=BB225_4 Depth=1
"ldr w12, [sp, #12]\n" // 4-byte Folded Reload
"ldr x23, [sp, #248]\n" // 8-byte Folded Reload
"cmp w17, w12\n"
"b.eq " DC_KERNEL_NO_MULT_2 "b\n"
DC_KERNEL_NO_MULT_33 ":\n" // in Loop: Header=BB225_4 Depth=1
"sub w12, w17, w12\n"
DC_KERNEL_NO_MULT_34 ":\n" // Parent Loop BB225_4 Depth=1
// => This Inner Loop Header: Depth=2
"subs w12, w12, #1\n" // =1
"b.ne " DC_KERNEL_NO_MULT_34 "b\n"
"b " DC_KERNEL_NO_MULT_2 "b\n"
DC_KERNEL_NO_MULT_35 ":\n"
// Compiled intrinsics total stack 464, now 320 for spillage only.
"add sp, sp, #320\n" // =464
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_NO_MULT_1
#undef DC_KERNEL_NO_MULT_2
#undef DC_KERNEL_NO_MULT_3
#undef DC_KERNEL_NO_MULT_4
#undef DC_KERNEL_NO_MULT_5
#undef DC_KERNEL_NO_MULT_6
#undef DC_KERNEL_NO_MULT_7
#undef DC_KERNEL_NO_MULT_8
#undef DC_KERNEL_NO_MULT_9
#undef DC_KERNEL_NO_MULT_10
#undef DC_KERNEL_NO_MULT_11
#undef DC_KERNEL_NO_MULT_12
#undef DC_KERNEL_NO_MULT_13
#undef DC_KERNEL_NO_MULT_14
#undef DC_KERNEL_NO_MULT_15
#undef DC_KERNEL_NO_MULT_16
#undef DC_KERNEL_NO_MULT_17
#undef DC_KERNEL_NO_MULT_18
#undef DC_KERNEL_NO_MULT_19
#undef DC_KERNEL_NO_MULT_20
#undef DC_KERNEL_NO_MULT_21
#undef DC_KERNEL_NO_MULT_22
#undef DC_KERNEL_NO_MULT_23
#undef DC_KERNEL_NO_MULT_24
#undef DC_KERNEL_NO_MULT_25
#undef DC_KERNEL_NO_MULT_26
#undef DC_KERNEL_NO_MULT_27
#undef DC_KERNEL_NO_MULT_28
#undef DC_KERNEL_NO_MULT_29
#undef DC_KERNEL_NO_MULT_30
#undef DC_KERNEL_NO_MULT_31
#undef DC_KERNEL_NO_MULT_32
#undef DC_KERNEL_NO_MULT_33
#undef DC_KERNEL_NO_MULT_34
#undef DC_KERNEL_NO_MULT_35
} // NOLINT(readability/fn_size) Manually unrolled.
static void __attribute__((noinline))
Run(const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/2> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_NO_MULT_STRIDE_1 "1"
#define DC_KERNEL_NO_MULT_STRIDE_2 "2"
#define DC_KERNEL_NO_MULT_STRIDE_3 "3"
#define DC_KERNEL_NO_MULT_STRIDE_4 "4"
#define DC_KERNEL_NO_MULT_STRIDE_5 "5"
#define DC_KERNEL_NO_MULT_STRIDE_6 "6"
#define DC_KERNEL_NO_MULT_STRIDE_7 "7"
#define DC_KERNEL_NO_MULT_STRIDE_8 "8"
#define DC_KERNEL_NO_MULT_STRIDE_9 "9"
#define DC_KERNEL_NO_MULT_STRIDE_10 "10"
#define DC_KERNEL_NO_MULT_STRIDE_11 "11"
#define DC_KERNEL_NO_MULT_STRIDE_12 "12"
#define DC_KERNEL_NO_MULT_STRIDE_13 "13"
#define DC_KERNEL_NO_MULT_STRIDE_14 "14"
#define DC_KERNEL_NO_MULT_STRIDE_15 "15"
#define DC_KERNEL_NO_MULT_STRIDE_16 "16"
#define DC_KERNEL_NO_MULT_STRIDE_17 "17"
#define DC_KERNEL_NO_MULT_STRIDE_18 "18"
#define DC_KERNEL_NO_MULT_STRIDE_19 "19"
#define DC_KERNEL_NO_MULT_STRIDE_20 "20"
#define DC_KERNEL_NO_MULT_STRIDE_21 "21"
#define DC_KERNEL_NO_MULT_STRIDE_22 "22"
#define DC_KERNEL_NO_MULT_STRIDE_23 "23"
#define DC_KERNEL_NO_MULT_STRIDE_24 "24"
#define DC_KERNEL_NO_MULT_STRIDE_25 "25"
#define DC_KERNEL_NO_MULT_STRIDE_26 "26"
#define DC_KERNEL_NO_MULT_STRIDE_27 "27"
#define DC_KERNEL_NO_MULT_STRIDE_28 "28"
#define DC_KERNEL_NO_MULT_STRIDE_29 "29"
#define DC_KERNEL_NO_MULT_STRIDE_30 "30"
#define DC_KERNEL_NO_MULT_STRIDE_31 "31"
#define DC_KERNEL_NO_MULT_STRIDE_32 "32"
#define DC_KERNEL_NO_MULT_STRIDE_33 "33"
#define DC_KERNEL_NO_MULT_STRIDE_34 "34"
#define DC_KERNEL_NO_MULT_STRIDE_35 "35"
asm volatile(
// Compiled code used block of 160 for spill out of total stack of 304.
"sub sp, sp, #160\n" // =304
"stp %[output_block_data], %[filter_workspace], [sp, #144]\n" // 16-byte Folded Spill
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"cmp w8, #1\n" // =1
"b.lt " DC_KERNEL_NO_MULT_STRIDE_35 "f\n"
// %bb.1:
"ldr x14, [%[function_params]]\n"
"ldpsw x11, x12, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldp w13, w3, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"add x15, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "\n" // =40
"add x17, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "\n" // =44
"add x5, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"add x6, %[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT) "\n" // =36
"add x7, %[function_params], #" STR(DP_OFFSET_OUTPUT_MULTIPLIER) "\n" // =32
"ldrsw x19, [%[function_params], #" STR(DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldr w1, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"ldp w16, w4, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"ld1r { v1.8b }, [x15]\n"
"lsl w15, w14, #1\n"
"sxtw x20, w15\n"
"cmp w16, #1\n" // =1
"ldr x15, [sp, #144]\n" // 8-byte Folded Reload
"ccmp w3, w13, #0, eq\n"
"ld1r { v0.8h }, [x5]\n"
"ld1r { v2.8b }, [x17]\n"
"ld1r { v3.4s }, [x7]\n"
"ld1r { v4.4s }, [x6]\n"
"csel w23, w3, w13, lt\n"
"sxtw x6, w14\n"
"bic w14, w23, w23, asr #31\n"
"lsl x5, x12, #1\n"
"madd x15, x20, x14, x15\n"
"sub x14, x13, x14\n"
"mov x9, xzr\n"
"mov x10, xzr\n"
"str w4, [sp, #84]\n" // 4-byte Folded Spill
"lsl %[function_params], x19, #5\n"
"lsl x7, x12, #2\n"
"add x19, x5, x12\n"
"str x14, [sp, #136]\n" // 8-byte Folded Spill
"add x14, x15, #4\n" // =4
"str %[output_block_data], [sp, #72]\n" // 8-byte Folded Spill
"str x15, [sp, #88]\n" // 8-byte Folded Spill
"str x14, [sp, #8]\n" // 8-byte Folded Spill
// implicit-def: $q16
// implicit-def: $q7
// implicit-def: $q22
// implicit-def: $q18
// implicit-def: $q17
// implicit-def: $q6
// implicit-def: $q11
// implicit-def: $q13
// implicit-def: $q14
// implicit-def: $q15
// implicit-def: $q20
"b " DC_KERNEL_NO_MULT_STRIDE_4 "f\n"
DC_KERNEL_NO_MULT_STRIDE_2 ":\n" // in Loop: Header=BB227_4 Depth=1
"add x25, %[bias_data], #32\n" // =32
"mov v22.16b, v12.16b\n"
DC_KERNEL_NO_MULT_STRIDE_3 ":\n" // in Loop: Header=BB227_4 Depth=1
"add x10, x10, #1\n" // =1
"cmp x10, x8\n"
"add x9, x9, #8\n" // =8
"mov %[bias_data], x25\n"
"b.eq " DC_KERNEL_NO_MULT_STRIDE_35 "f\n"
DC_KERNEL_NO_MULT_STRIDE_4 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB227_30 Depth 2
// Child Loop BB227_22 Depth 2
// Child Loop BB227_7 Depth 2
// Child Loop BB227_10 Depth 2
// Child Loop BB227_13 Depth 2
// Child Loop BB227_26 Depth 2
"ldr x15, [sp, #152]\n" // 8-byte Folded Reload
"add w14, w10, w10, lsl #1\n"
"lsl w14, w14, #5\n"
"cmp w1, #2\n" // =2
"add x27, x15, x14\n"
"madd x26, x10, %[function_params], %[scratch_block_data]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_15 "f\n"
// %bb.5: // in Loop: Header=BB227_4 Depth=1
"ubfx x14, x9, #3, #29\n"
"lsl x25, x14, #3\n"
"ldr x14, [sp, #88]\n" // 8-byte Folded Reload
"ldr q24, [x27]\n"
"ldr q25, [x27, #32]\n"
"ldr q26, [x27, #64]\n"
"add x24, x14, x25\n"
"ldr x14, [sp, #144]\n" // 8-byte Folded Reload
"ldr q27, [%[bias_data]]\n"
"ldr q31, [x26]\n"
"ldr q8, [x26, x12]\n"
"ldr q30, [x26, x5]\n"
"ldr q29, [x26, x19]\n"
"ldr q28, [x26, x7]\n"
"lsl w15, w10, #3\n"
"cmp w23, #1\n" // =1
"add x28, x14, x15\n"
"mov v12.16b, v22.16b\n"
"mov w14, wzr\n"
"b.lt " DC_KERNEL_NO_MULT_STRIDE_9 "f\n"
// %bb.6: // in Loop: Header=BB227_4 Depth=1
"mov x17, xzr\n"
"add x22, x26, #32\n" // =32
"mov x21, x23\n"
"mov v19.16b, v30.16b\n"
DC_KERNEL_NO_MULT_STRIDE_7 ":\n" // Parent Loop BB227_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v20.16b, v27.16b\n"
"mov v21.16b, v27.16b\n"
".word 0x4e9f9714 // sdot v20.4s, v24.16b, v31.16b\n"
".word 0x4e939715 // sdot v21.4s, v24.16b, v19.16b\n"
".word 0x4e889734 // sdot v20.4s, v25.16b, v8.16b\n"
".word 0x4e9d9735 // sdot v21.4s, v25.16b, v29.16b\n"
".word 0x4e939754 // sdot v20.4s, v26.16b, v19.16b\n"
".word 0x4e9c9755 // sdot v21.4s, v26.16b, v28.16b\n"
"sqrdmulh v20.4s, v20.4s, v3.4s\n"
"and %[output_block_data], x17, #0xffffffe0\n"
"sqrdmulh v21.4s, v21.4s, v3.4s\n"
"sqrshl v20.4s, v20.4s, v4.4s\n"
"add %[output_block_data], x22, x3\n"
"sqrshl v21.4s, v21.4s, v4.4s\n"
"sqxtn v20.4h, v20.4s\n"
"rev32 v22.8h, v31.8h\n"
"rev32 v23.8h, v8.8h\n"
"rev32 v9.8h, v30.8h\n"
"rev32 v10.8h, v29.8h\n"
"ldr q31, [%[output_block_data]]\n"
"ldr q8, [%[output_block_data], x12]\n"
"ldr q30, [%[output_block_data], x5]\n"
"ldr q29, [%[output_block_data], x19]\n"
"rev32 v19.8h, v28.8h\n"
"ldr q28, [%[output_block_data], x7]\n"
"sqxtn2 v20.8h, v21.4s\n"
"sqadd v20.8h, v20.8h, v0.8h\n"
"sqxtun v20.8b, v20.8h\n"
"add x15, x28, w14, sxtw\n"
"umax v20.8b, v20.8b, v1.8b\n"
"add %[output_block_data], x15, x11\n"
"umin v20.8b, v20.8b, v2.8b\n"
"mov v11.16b, v27.16b\n"
"str s20, [x15]\n"
"st1 { v20.s }[1], [%[output_block_data]]\n"
"trn1 v20.8h, v22.8h, v31.8h\n"
"mov v21.16b, v27.16b\n"
"trn1 v22.8h, v23.8h, v8.8h\n"
"trn1 v23.8h, v9.8h, v30.8h\n"
".word 0x4e94970b // sdot v11.4s, v24.16b, v20.16b\n"
"trn1 v9.8h, v10.8h, v29.8h\n"
".word 0x4e979715 // sdot v21.4s, v24.16b, v23.16b\n"
".word 0x4e96972b // sdot v11.4s, v25.16b, v22.16b\n"
"trn1 v19.8h, v19.8h, v28.8h\n"
".word 0x4e899735 // sdot v21.4s, v25.16b, v9.16b\n"
".word 0x4e97974b // sdot v11.4s, v26.16b, v23.16b\n"
".word 0x4e939755 // sdot v21.4s, v26.16b, v19.16b\n"
"sqrdmulh v19.4s, v11.4s, v3.4s\n"
"sqrdmulh v20.4s, v21.4s, v3.4s\n"
"sqrshl v19.4s, v19.4s, v4.4s\n"
"sqrshl v20.4s, v20.4s, v4.4s\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqadd v19.8h, v19.8h, v0.8h\n"
"sqxtun v19.8b, v19.8h\n"
"add x15, x15, x6\n"
"umax v19.8b, v19.8b, v1.8b\n"
"add %[output_block_data], x15, x11\n"
"umin v19.8b, v19.8b, v2.8b\n"
"add x17, x17, #32\n" // =32
"subs x21, x21, #1\n" // =1
"str s19, [x15]\n"
"st1 { v19.s }[1], [%[output_block_data]]\n"
"add w14, w14, w20\n"
"mov v19.16b, v30.16b\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_7 "b\n"
// %bb.8: // in Loop: Header=BB227_4 Depth=1
"mov v20.16b, v31.16b\n"
"mov v15.16b, v8.16b\n"
"mov v14.16b, v30.16b\n"
"mov v13.16b, v29.16b\n"
"mov v11.16b, v28.16b\n"
"mov w14, w23\n"
DC_KERNEL_NO_MULT_STRIDE_9 ":\n" // in Loop: Header=BB227_4 Depth=1
"cmp w14, w13\n"
"ldr x14, [sp, #136]\n" // 8-byte Folded Reload
"b.ge " DC_KERNEL_NO_MULT_STRIDE_11 "f\n"
DC_KERNEL_NO_MULT_STRIDE_10 ":\n" // Parent Loop BB227_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v9.16b, v27.16b\n"
"mov v10.16b, v27.16b\n"
".word 0x4e9f9709 // sdot v9.4s, v24.16b, v31.16b\n"
".word 0x4e889729 // sdot v9.4s, v25.16b, v8.16b\n"
".word 0x4e9e970a // sdot v10.4s, v24.16b, v30.16b\n"
".word 0x4e9e9749 // sdot v9.4s, v26.16b, v30.16b\n"
".word 0x4e9d972a // sdot v10.4s, v25.16b, v29.16b\n"
".word 0x4e9c974a // sdot v10.4s, v26.16b, v28.16b\n"
"sqrdmulh v9.4s, v9.4s, v3.4s\n"
"sqrdmulh v10.4s, v10.4s, v3.4s\n"
"sqrshl v9.4s, v9.4s, v4.4s\n"
"sqrshl v10.4s, v10.4s, v4.4s\n"
"sqxtn v9.4h, v9.4s\n"
"sqxtn2 v9.8h, v10.4s\n"
"sqadd v9.8h, v9.8h, v0.8h\n"
"sqxtun v9.8b, v9.8h\n"
"umax v9.8b, v9.8b, v1.8b\n"
"rev32 v31.8h, v31.8h\n"
"rev32 v8.8h, v8.8h\n"
"rev32 v30.8h, v30.8h\n"
"rev32 v29.8h, v29.8h\n"
"rev32 v28.8h, v28.8h\n"
"umin v9.8b, v9.8b, v2.8b\n"
"add x15, x24, x11\n"
"subs x14, x14, #1\n" // =1
"trn1 v31.8h, v31.8h, v20.8h\n"
"trn1 v8.8h, v8.8h, v15.8h\n"
"trn1 v29.8h, v29.8h, v13.8h\n"
"trn1 v30.8h, v30.8h, v14.8h\n"
"trn1 v28.8h, v28.8h, v11.8h\n"
"str s9, [x24]\n"
"add x24, x24, x20\n"
"st1 { v9.s }[1], [x15]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_10 "b\n"
DC_KERNEL_NO_MULT_STRIDE_11 ":\n" // in Loop: Header=BB227_4 Depth=1
"ldr q24, [x27, #16]\n"
"ldr q25, [x27, #48]\n"
"ldr q26, [x27, #80]\n"
"ldr q30, [x26, #16]!\n"
"ldr q27, [%[bias_data], #16]\n"
"cmp w23, #0\n" // =0
"ldr q8, [x26, x12]\n"
"ldr q31, [x26, x5]\n"
"ldr q29, [x26, x19]\n"
"ldr q28, [x26, x7]\n"
"b.le " DC_KERNEL_NO_MULT_STRIDE_24 "f\n"
// %bb.12: // in Loop: Header=BB227_4 Depth=1
"mov w14, wzr\n"
"mov x17, xzr\n"
"add x22, x26, #32\n" // =32
"add x24, x28, #4\n" // =4
"mov x21, x23\n"
"mov v19.16b, v31.16b\n"
DC_KERNEL_NO_MULT_STRIDE_13 ":\n" // Parent Loop BB227_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v5.16b, v27.16b\n"
"mov v20.16b, v27.16b\n"
".word 0x4e9e9705 // sdot v5.4s, v24.16b, v30.16b\n"
".word 0x4e939714 // sdot v20.4s, v24.16b, v19.16b\n"
".word 0x4e889725 // sdot v5.4s, v25.16b, v8.16b\n"
".word 0x4e9d9734 // sdot v20.4s, v25.16b, v29.16b\n"
".word 0x4e939745 // sdot v5.4s, v26.16b, v19.16b\n"
".word 0x4e9c9754 // sdot v20.4s, v26.16b, v28.16b\n"
"sqrdmulh v5.4s, v5.4s, v3.4s\n"
"and %[output_block_data], x17, #0xffffffe0\n"
"sqrdmulh v20.4s, v20.4s, v3.4s\n"
"sqrshl v5.4s, v5.4s, v4.4s\n"
"add %[output_block_data], x22, x3\n"
"sqrshl v20.4s, v20.4s, v4.4s\n"
"sqxtn v5.4h, v5.4s\n"
"rev32 v21.8h, v30.8h\n"
"rev32 v22.8h, v8.8h\n"
"rev32 v23.8h, v31.8h\n"
"rev32 v9.8h, v29.8h\n"
"ldr q30, [%[output_block_data]]\n"
"ldr q8, [%[output_block_data], x12]\n"
"ldr q31, [%[output_block_data], x5]\n"
"ldr q29, [%[output_block_data], x19]\n"
"rev32 v19.8h, v28.8h\n"
"ldr q28, [%[output_block_data], x7]\n"
"sqxtn2 v5.8h, v20.4s\n"
"sqadd v5.8h, v5.8h, v0.8h\n"
"sqxtun v5.8b, v5.8h\n"
"add x15, x24, w14, sxtw\n"
"umax v5.8b, v5.8b, v1.8b\n"
"add %[output_block_data], x15, x11\n"
"umin v5.8b, v5.8b, v2.8b\n"
"mov v10.16b, v27.16b\n"
"str s5, [x15]\n"
"st1 { v5.s }[1], [%[output_block_data]]\n"
"trn1 v5.8h, v21.8h, v30.8h\n"
"mov v20.16b, v27.16b\n"
"trn1 v21.8h, v22.8h, v8.8h\n"
"trn1 v22.8h, v23.8h, v31.8h\n"
".word 0x4e85970a // sdot v10.4s, v24.16b, v5.16b\n"
"trn1 v23.8h, v9.8h, v29.8h\n"
".word 0x4e969714 // sdot v20.4s, v24.16b, v22.16b\n"
".word 0x4e95972a // sdot v10.4s, v25.16b, v21.16b\n"
"trn1 v19.8h, v19.8h, v28.8h\n"
".word 0x4e979734 // sdot v20.4s, v25.16b, v23.16b\n"
".word 0x4e96974a // sdot v10.4s, v26.16b, v22.16b\n"
".word 0x4e939754 // sdot v20.4s, v26.16b, v19.16b\n"
"sqrdmulh v5.4s, v10.4s, v3.4s\n"
"sqrdmulh v19.4s, v20.4s, v3.4s\n"
"sqrshl v5.4s, v5.4s, v4.4s\n"
"sqrshl v19.4s, v19.4s, v4.4s\n"
"sqxtn v5.4h, v5.4s\n"
"sqxtn2 v5.8h, v19.4s\n"
"sqadd v5.8h, v5.8h, v0.8h\n"
"sqxtun v5.8b, v5.8h\n"
"add x15, x15, x6\n"
"umax v5.8b, v5.8b, v1.8b\n"
"add x17, x17, #32\n" // =32
"subs x21, x21, #1\n" // =1
"add %[output_block_data], x15, x11\n"
"umin v5.8b, v5.8b, v2.8b\n"
"add w14, w14, w20\n"
"mov v19.16b, v31.16b\n"
"str s5, [x15]\n"
"st1 { v5.s }[1], [%[output_block_data]]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_13 "b\n"
// %bb.14: // in Loop: Header=BB227_4 Depth=1
"mov v20.16b, v30.16b\n"
"mov v15.16b, v8.16b\n"
"mov v14.16b, v31.16b\n"
"mov v13.16b, v29.16b\n"
"mov v11.16b, v28.16b\n"
"mov w14, w23\n"
"cmp w14, w13\n"
"b.ge " DC_KERNEL_NO_MULT_STRIDE_2 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_25 "f\n"
DC_KERNEL_NO_MULT_STRIDE_15 ":\n" // in Loop: Header=BB227_4 Depth=1
"cmp w13, #1\n" // =1
"add x25, %[bias_data], #32\n" // =32
"b.lt " DC_KERNEL_NO_MULT_STRIDE_3 "b\n"
// %bb.16: // in Loop: Header=BB227_4 Depth=1
"stp q13, q11, [sp, #96]\n" // 32-byte Folded Spill
"add x15, x26, x12\n"
"ldp q9, q10, [x15]\n"
"ldr x15, [sp, #144]\n" // 8-byte Folded Reload
"lsl w14, w10, #3\n"
"ldp q30, q31, [%[bias_data]]\n"
"add x17, x26, x5\n"
"add %[bias_data], x15, x14\n"
"ldr w14, [sp, #84]\n" // 4-byte Folded Reload
"ldp q24, q25, [x27]\n"
"ldp q26, q27, [x27, #32]\n"
"ldp q28, q29, [x27, #64]\n"
"ldp q12, q11, [x26], #32\n"
"ldp q8, q13, [x17]\n"
"cmp w13, w14\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_27 "f\n"
// %bb.17: // in Loop: Header=BB227_4 Depth=1
"ldr x14, [sp, #72]\n" // 8-byte Folded Reload
"mov x24, xzr\n"
"mov w27, wzr\n"
"mov x28, x13\n"
"mov v19.16b, v15.16b\n"
"mov v5.16b, v14.16b\n"
"cbnz x14, " DC_KERNEL_NO_MULT_STRIDE_21 "f\n"
"b " DC_KERNEL_NO_MULT_STRIDE_22 "f\n"
DC_KERNEL_NO_MULT_STRIDE_18 ":\n" // in Loop: Header=BB227_22 Depth=2
"mov v14.16b, v30.16b\n"
".word 0x4e8c970e // sdot v14.4s, v24.16b, v12.16b\n"
"mov v12.16b, v31.16b\n"
".word 0x4e8b972c // sdot v12.4s, v25.16b, v11.16b\n"
".word 0x4e89974e // sdot v14.4s, v26.16b, v9.16b\n"
".word 0x4e8a976c // sdot v12.4s, v27.16b, v10.16b\n"
".word 0x4e88978e // sdot v14.4s, v28.16b, v8.16b\n"
".word 0x4e8d97ac // sdot v12.4s, v29.16b, v13.16b\n"
"sqrdmulh v8.4s, v14.4s, v3.4s\n"
"sqrdmulh v9.4s, v12.4s, v3.4s\n"
"sqrshl v8.4s, v8.4s, v4.4s\n"
"sqrshl v9.4s, v9.4s, v4.4s\n"
"sqxtn v8.4h, v8.4s\n"
"sqxtn2 v8.8h, v9.4s\n"
"sqadd v8.8h, v8.8h, v0.8h\n"
"sqxtun v8.8b, v8.8h\n"
"umax v8.8b, v8.8b, v1.8b\n"
"umin v8.8b, v8.8b, v2.8b\n"
"str d8, [x15, x6]\n"
"mov v12.16b, v6.16b\n"
"mov v9.16b, v17.16b\n"
"mov v8.16b, v18.16b\n"
"mov v11.16b, v22.16b\n"
"mov v10.16b, v7.16b\n"
"mov v13.16b, v16.16b\n"
DC_KERNEL_NO_MULT_STRIDE_19 ":\n" // in Loop: Header=BB227_22 Depth=2
"mov v14.16b, v5.16b\n"
"mov v15.16b, v19.16b\n"
"add w27, w27, w20\n"
"add x24, x24, #32\n" // =32
"subs x28, x28, #1\n" // =1
"sub x14, x14, #1\n" // =1
"b.eq " DC_KERNEL_NO_MULT_STRIDE_33 "f\n"
// %bb.20: // in Loop: Header=BB227_22 Depth=2
"mov v19.16b, v15.16b\n"
"mov v5.16b, v14.16b\n"
"cbz x14, " DC_KERNEL_NO_MULT_STRIDE_22 "f\n"
DC_KERNEL_NO_MULT_STRIDE_21 ":\n" // in Loop: Header=BB227_4 Depth=1
"and x15, x24, #0xffffffe0\n"
"add x15, x26, x15\n"
"add x17, x15, x12\n"
"add %[output_block_data], x15, x5\n"
"ldp q6, q22, [x15]\n"
"ldp q17, q7, [x17]\n"
"ldp q18, q16, [%[output_block_data]]\n"
DC_KERNEL_NO_MULT_STRIDE_22 ":\n" // Parent Loop BB227_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v14.16b, v30.16b\n"
"mov v15.16b, v31.16b\n"
".word 0x4e8c970e // sdot v14.4s, v24.16b, v12.16b\n"
".word 0x4e89974e // sdot v14.4s, v26.16b, v9.16b\n"
".word 0x4e8b972f // sdot v15.4s, v25.16b, v11.16b\n"
".word 0x4e88978e // sdot v14.4s, v28.16b, v8.16b\n"
".word 0x4e8a976f // sdot v15.4s, v27.16b, v10.16b\n"
".word 0x4e8d97af // sdot v15.4s, v29.16b, v13.16b\n"
"sqrdmulh v14.4s, v14.4s, v3.4s\n"
"sqrdmulh v15.4s, v15.4s, v3.4s\n"
"sqrshl v14.4s, v14.4s, v4.4s\n"
"sqrshl v15.4s, v15.4s, v4.4s\n"
"sqxtn v14.4h, v14.4s\n"
"sqxtn2 v14.8h, v15.4s\n"
"sqadd v14.8h, v14.8h, v0.8h\n"
"sqxtun v14.8b, v14.8h\n"
"rev32 v12.8h, v12.8h\n"
"rev32 v9.8h, v9.8h\n"
"rev32 v8.8h, v8.8h\n"
"rev32 v11.8h, v11.8h\n"
"rev32 v10.8h, v10.8h\n"
"rev32 v13.8h, v13.8h\n"
"umax v14.8b, v14.8b, v1.8b\n"
"add x15, %[bias_data], w27, sxtw\n"
"cmp w16, #1\n" // =1
"trn1 v12.8h, v12.8h, v6.8h\n"
"trn1 v11.8h, v11.8h, v22.8h\n"
"trn1 v9.8h, v9.8h, v17.8h\n"
"trn1 v10.8h, v10.8h, v7.8h\n"
"trn1 v8.8h, v8.8h, v18.8h\n"
"umin v14.8b, v14.8b, v2.8b\n"
"trn1 v13.8h, v13.8h, v16.8h\n"
"str d14, [x15]\n"
"b.gt " DC_KERNEL_NO_MULT_STRIDE_18 "b\n"
// %bb.23: // in Loop: Header=BB227_22 Depth=2
"cbz x14, " DC_KERNEL_NO_MULT_STRIDE_19 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_18 "b\n"
DC_KERNEL_NO_MULT_STRIDE_24 ":\n" // in Loop: Header=BB227_4 Depth=1
"mov w14, wzr\n"
"cmp w14, w13\n"
"b.ge " DC_KERNEL_NO_MULT_STRIDE_2 "b\n"
DC_KERNEL_NO_MULT_STRIDE_25 ":\n" // in Loop: Header=BB227_4 Depth=1
"ldr x14, [sp, #8]\n" // 8-byte Folded Reload
"ldr x15, [sp, #136]\n" // 8-byte Folded Reload
"add x14, x14, x25\n"
DC_KERNEL_NO_MULT_STRIDE_26 ":\n" // Parent Loop BB227_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v5.16b, v27.16b\n"
"mov v19.16b, v27.16b\n"
".word 0x4e9e9705 // sdot v5.4s, v24.16b, v30.16b\n"
".word 0x4e889725 // sdot v5.4s, v25.16b, v8.16b\n"
".word 0x4e9f9713 // sdot v19.4s, v24.16b, v31.16b\n"
".word 0x4e9f9745 // sdot v5.4s, v26.16b, v31.16b\n"
".word 0x4e9d9733 // sdot v19.4s, v25.16b, v29.16b\n"
".word 0x4e9c9753 // sdot v19.4s, v26.16b, v28.16b\n"
"sqrdmulh v5.4s, v5.4s, v3.4s\n"
"sqrdmulh v19.4s, v19.4s, v3.4s\n"
"sqrshl v5.4s, v5.4s, v4.4s\n"
"sqrshl v19.4s, v19.4s, v4.4s\n"
"sqxtn v5.4h, v5.4s\n"
"sqxtn2 v5.8h, v19.4s\n"
"sqadd v5.8h, v5.8h, v0.8h\n"
"sqxtun v5.8b, v5.8h\n"
"umax v5.8b, v5.8b, v1.8b\n"
"mov v9.16b, v20.16b\n"
"rev32 v20.8h, v30.8h\n"
"rev32 v21.8h, v8.8h\n"
"rev32 v22.8h, v31.8h\n"
"rev32 v23.8h, v29.8h\n"
"rev32 v28.8h, v28.8h\n"
"umin v5.8b, v5.8b, v2.8b\n"
"add x17, x14, x11\n"
"subs x15, x15, #1\n" // =1
"trn1 v30.8h, v20.8h, v9.8h\n"
"mov v20.16b, v9.16b\n"
"trn1 v8.8h, v21.8h, v15.8h\n"
"trn1 v29.8h, v23.8h, v13.8h\n"
"trn1 v31.8h, v22.8h, v14.8h\n"
"trn1 v28.8h, v28.8h, v11.8h\n"
"str s5, [x14]\n"
"add x14, x14, x20\n"
"st1 { v5.s }[1], [x17]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_26 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_2 "b\n"
DC_KERNEL_NO_MULT_STRIDE_27 ":\n" // in Loop: Header=BB227_4 Depth=1
"ldr x28, [sp, #72]\n" // 8-byte Folded Reload
"mov w14, wzr\n"
"mov x24, xzr\n"
"mov x27, x13\n"
"stp q20, q15, [sp, #16]\n" // 32-byte Folded Spill
"str q14, [sp, #48]\n" // 16-byte Folded Spill
"b " DC_KERNEL_NO_MULT_STRIDE_30 "f\n"
DC_KERNEL_NO_MULT_STRIDE_28 ":\n" // in Loop: Header=BB227_30 Depth=2
"mov v5.16b, v30.16b\n"
".word 0x4e8c9705 // sdot v5.4s, v24.16b, v12.16b\n"
"mov v19.16b, v31.16b\n"
".word 0x4e8b9733 // sdot v19.4s, v25.16b, v11.16b\n"
".word 0x4e899745 // sdot v5.4s, v26.16b, v9.16b\n"
".word 0x4e8a9773 // sdot v19.4s, v27.16b, v10.16b\n"
".word 0x4e889785 // sdot v5.4s, v28.16b, v8.16b\n"
".word 0x4e8d97b3 // sdot v19.4s, v29.16b, v13.16b\n"
"sqrdmulh v5.4s, v5.4s, v3.4s\n"
"sqrdmulh v19.4s, v19.4s, v3.4s\n"
"sqrshl v5.4s, v5.4s, v4.4s\n"
"sqrshl v19.4s, v19.4s, v4.4s\n"
"sqxtn v5.4h, v5.4s\n"
"sqxtn2 v5.8h, v19.4s\n"
"sqadd v5.8h, v5.8h, v0.8h\n"
"sqxtun v5.8b, v5.8h\n"
"umax v5.8b, v5.8b, v1.8b\n"
"umin v5.8b, v5.8b, v2.8b\n"
"mov v6.16b, v14.16b\n"
"mov v12.16b, v14.16b\n"
"mov v9.16b, v17.16b\n"
"mov v8.16b, v18.16b\n"
"mov v11.16b, v22.16b\n"
"mov v10.16b, v7.16b\n"
"mov v13.16b, v16.16b\n"
"str d5, [x15, x6]\n"
DC_KERNEL_NO_MULT_STRIDE_29 ":\n" // in Loop: Header=BB227_30 Depth=2
"add x24, x24, #32\n" // =32
"sub x28, x28, #1\n" // =1
"subs x27, x27, #1\n" // =1
"add w14, w14, w20\n"
"b.eq " DC_KERNEL_NO_MULT_STRIDE_34 "f\n"
DC_KERNEL_NO_MULT_STRIDE_30 ":\n" // Parent Loop BB227_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v14.16b, v30.16b\n"
"mov v15.16b, v31.16b\n"
".word 0x4e8c970e // sdot v14.4s, v24.16b, v12.16b\n"
"and x17, x24, #0xffffffe0\n"
".word 0x4e8b972f // sdot v15.4s, v25.16b, v11.16b\n"
".word 0x4e89974e // sdot v14.4s, v26.16b, v9.16b\n"
"add x17, x26, x17\n"
".word 0x4e8a976f // sdot v15.4s, v27.16b, v10.16b\n"
".word 0x4e88978e // sdot v14.4s, v28.16b, v8.16b\n"
"rev32 v21.8h, v8.8h\n"
"rev32 v6.8h, v11.8h\n"
"ldp q11, q22, [x17]\n"
".word 0x4e8d97af // sdot v15.4s, v29.16b, v13.16b\n"
"sqrdmulh v8.4s, v14.4s, v3.4s\n"
"rev32 v20.8h, v9.8h\n"
"sqrdmulh v9.4s, v15.4s, v3.4s\n"
"sqrshl v8.4s, v8.4s, v4.4s\n"
"rev32 v5.8h, v13.8h\n"
"add %[output_block_data], x17, x12\n"
"add x17, x17, x5\n"
"sqrshl v9.4s, v9.4s, v4.4s\n"
"sqxtn v13.4h, v8.4s\n"
"rev32 v19.8h, v12.8h\n"
"ldp q17, q7, [%[output_block_data]]\n"
"ldp q18, q16, [x17]\n"
"sqxtn2 v13.8h, v9.4s\n"
"trn1 v12.8h, v19.8h, v11.8h\n"
"sqadd v19.8h, v13.8h, v0.8h\n"
"sqxtun v19.8b, v19.8h\n"
"rev32 v23.8h, v10.8h\n"
"umax v19.8b, v19.8b, v1.8b\n"
"add x15, %[bias_data], w14, sxtw\n"
"cmp w16, #1\n" // =1
"mov v14.16b, v11.16b\n"
"trn1 v11.8h, v6.8h, v22.8h\n"
"trn1 v9.8h, v20.8h, v17.8h\n"
"trn1 v8.8h, v21.8h, v18.8h\n"
"trn1 v10.8h, v23.8h, v7.8h\n"
"umin v19.8b, v19.8b, v2.8b\n"
"trn1 v13.8h, v5.8h, v16.8h\n"
"str d19, [x15]\n"
"b.gt " DC_KERNEL_NO_MULT_STRIDE_28 "b\n"
// %bb.31: // in Loop: Header=BB227_30 Depth=2
"cbnz x28, " DC_KERNEL_NO_MULT_STRIDE_28 "b\n"
// %bb.32: // in Loop: Header=BB227_30 Depth=2
"mov v6.16b, v14.16b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_29 "b\n"
DC_KERNEL_NO_MULT_STRIDE_33 ":\n" // in Loop: Header=BB227_4 Depth=1
"ldp q13, q11, [sp, #96]\n" // 32-byte Folded Reload
"b " DC_KERNEL_NO_MULT_STRIDE_3 "b\n"
DC_KERNEL_NO_MULT_STRIDE_34 ":\n" // in Loop: Header=BB227_4 Depth=1
"ldp q13, q11, [sp, #96]\n" // 32-byte Folded Reload
"ldp q15, q14, [sp, #32]\n" // 32-byte Folded Reload
"ldr q20, [sp, #16]\n" // 16-byte Folded Reload
"b " DC_KERNEL_NO_MULT_STRIDE_3 "b\n"
DC_KERNEL_NO_MULT_STRIDE_35 ":\n"
// Compiled intrinsics total stack 304, now 160 for spillage only.
"add sp, sp, #160\n" // =304
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_NO_MULT_STRIDE_1
#undef DC_KERNEL_NO_MULT_STRIDE_2
#undef DC_KERNEL_NO_MULT_STRIDE_3
#undef DC_KERNEL_NO_MULT_STRIDE_4
#undef DC_KERNEL_NO_MULT_STRIDE_5
#undef DC_KERNEL_NO_MULT_STRIDE_6
#undef DC_KERNEL_NO_MULT_STRIDE_7
#undef DC_KERNEL_NO_MULT_STRIDE_8
#undef DC_KERNEL_NO_MULT_STRIDE_9
#undef DC_KERNEL_NO_MULT_STRIDE_10
#undef DC_KERNEL_NO_MULT_STRIDE_11
#undef DC_KERNEL_NO_MULT_STRIDE_12
#undef DC_KERNEL_NO_MULT_STRIDE_13
#undef DC_KERNEL_NO_MULT_STRIDE_14
#undef DC_KERNEL_NO_MULT_STRIDE_15
#undef DC_KERNEL_NO_MULT_STRIDE_16
#undef DC_KERNEL_NO_MULT_STRIDE_17
#undef DC_KERNEL_NO_MULT_STRIDE_18
#undef DC_KERNEL_NO_MULT_STRIDE_19
#undef DC_KERNEL_NO_MULT_STRIDE_20
#undef DC_KERNEL_NO_MULT_STRIDE_21
#undef DC_KERNEL_NO_MULT_STRIDE_22
#undef DC_KERNEL_NO_MULT_STRIDE_23
#undef DC_KERNEL_NO_MULT_STRIDE_24
#undef DC_KERNEL_NO_MULT_STRIDE_25
#undef DC_KERNEL_NO_MULT_STRIDE_26
#undef DC_KERNEL_NO_MULT_STRIDE_27
#undef DC_KERNEL_NO_MULT_STRIDE_28
#undef DC_KERNEL_NO_MULT_STRIDE_29
#undef DC_KERNEL_NO_MULT_STRIDE_30
#undef DC_KERNEL_NO_MULT_STRIDE_31
#undef DC_KERNEL_NO_MULT_STRIDE_32
#undef DC_KERNEL_NO_MULT_STRIDE_33
#undef DC_KERNEL_NO_MULT_STRIDE_34
#undef DC_KERNEL_NO_MULT_STRIDE_35
} // NOLINT(readability/fn_size) Manually unrolled.
static void __attribute__((noinline))
Run(const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/1> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_MULT_1 "1"
#define DC_KERNEL_MULT_2 "2"
#define DC_KERNEL_MULT_3 "3"
#define DC_KERNEL_MULT_4 "4"
#define DC_KERNEL_MULT_5 "5"
#define DC_KERNEL_MULT_6 "6"
#define DC_KERNEL_MULT_7 "7"
#define DC_KERNEL_MULT_8 "8"
#define DC_KERNEL_MULT_9 "9"
#define DC_KERNEL_MULT_10 "10"
#define DC_KERNEL_MULT_11 "11"
#define DC_KERNEL_MULT_12 "12"
#define DC_KERNEL_MULT_13 "13"
#define DC_KERNEL_MULT_14 "14"
#define DC_KERNEL_MULT_15 "15"
#define DC_KERNEL_MULT_16 "16"
#define DC_KERNEL_MULT_17 "17"
#define DC_KERNEL_MULT_18 "18"
#define DC_KERNEL_MULT_19 "19"
#define DC_KERNEL_MULT_20 "20"
#define DC_KERNEL_MULT_21 "21"
#define DC_KERNEL_MULT_22 "22"
asm volatile(
// Compiled code used block of 288 for spill out of total stack of 400.
// However, an 8-byte spill was sneaked in to #296.
// Spillage increased to 304 and these are mapped to #288.
"sub sp, sp, #304\n" // =400
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"str %[filter_workspace], [sp, #32]\n" // 8-byte Folded Spill
"cmp w8, #1\n" // =1
"str w8, [sp, #12]\n" // 4-byte Folded Spill
"b.lt " DC_KERNEL_MULT_22 "f\n"
// %bb.1:
"str wzr, [sp, #28]\n" // 4-byte Folded Spill
"ldpsw x21, x5, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldrb w8, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "]\n"
"ldrsw x17, [%[function_params], #" STR(DP_OFFSET_OUTPUT_DEPTH) "]\n"
"ldr w13, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"add x11, %[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT) "\n" // =36
"ldp w1, w15, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"add x10, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"add x12, %[function_params], #" STR(DP_OFFSET_OUTPUT_MULTIPLIER) "\n" // =32
"ld1r { v2.4s }, [x11]\n"
"dup v3.16b, w8\n"
"fmov s5, w8\n"
"lsl x11, x21, #1\n"
"add x7, x21, x21, lsl #1\n"
"lsl x8, x17, #1\n"
"ldr w16, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"ld1r { v0.8h }, [x10]\n"
"ld1r { v1.4s }, [x12]\n"
"str w13, [sp, #272]\n" // 4-byte Folded Spill
"cmp w13, #4\n" // =4
"add x10, x8, x17\n"
"add x6, x8, x7\n"
"add x12, x8, x11\n"
"add x13, x8, x21\n"
"add x8, %[output_block_data], x8\n"
"str x8, [sp, #176]\n" // 8-byte Folded Spill
"add x8, x7, x17\n"
"add x14, x11, x17\n"
"add x24, %[output_block_data], x8\n"
"add x8, %[output_block_data], x14\n"
"add x14, x5, #4\n" // =4
"ccmp w15, w1, #0, lt\n"
"str x14, [sp, #136]\n" // 8-byte Folded Spill
"lsl x14, x17, #2\n"
"ldrb w9, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "]\n"
"csel w25, w15, w1, lt\n"
"cmp w16, #1\n" // =1
"str x14, [sp, #128]\n" // 8-byte Folded Spill
"add x14, %[output_block_data], x21\n"
"add x22, x5, x5, lsl #2\n"
"str x16, [sp, #56]\n" // 8-byte Folded Spill
"cset w16, lt\n"
"cmp w1, #1\n" // =1
"str x14, [sp, #120]\n" // 8-byte Folded Spill
"add x14, %[output_block_data], x17\n"
"lsl x20, x5, #2\n"
"str w1, [sp, #276]\n" // 4-byte Folded Spill
"cset w1, lt\n"
"str x14, [sp, #112]\n" // 8-byte Folded Spill
"add x14, x22, #4\n" // =4
"add x19, x5, x5, lsl #1\n"
"orr w16, w16, w1\n"
"str x14, [sp, #104]\n" // 8-byte Folded Spill
"add x14, x20, #4\n" // =4
"dup v4.16b, w9\n"
"fmov s6, w9\n"
"lsl %[function_params], x5, #1\n"
"add x9, x21, x17\n"
"str w16, [sp, #8]\n" // 4-byte Folded Spill
"add x16, x10, x21\n"
"str x14, [sp, #96]\n" // 8-byte Folded Spill
"add x14, x19, #4\n" // =4
"mov x23, xzr\n"
"add x9, %[output_block_data], x9\n"
"str w15, [sp, #268]\n" // 4-byte Folded Spill
"add x15, x10, x11\n"
"add x27, %[output_block_data], x12\n"
"add x12, %[output_block_data], x16\n"
"str x14, [sp, #88]\n" // 8-byte Folded Spill
"add x14, %[function_params], #4\n" // =4
"stp x11, x21, [sp, #184]\n" // 16-byte Folded Spill
"add x11, %[output_block_data], x11\n"
"str x9, [sp, #168]\n" // 8-byte Folded Spill
"add x9, x10, x7\n"
"add x26, %[output_block_data], x6\n"
"add x28, %[output_block_data], x13\n"
"mov x13, x23\n"
"str x12, [sp, #144]\n" // 8-byte Folded Spill
"mov x12, x7\n"
"stp x7, %[output_block_data], [sp, #40]\n" // 16-byte Folded Spill
"stp x19, x5, [sp, #248]\n" // 16-byte Folded Spill
"stp x22, x20, [sp, #232]\n" // 16-byte Folded Spill
"stp x11, x14, [sp, #72]\n" // 16-byte Folded Spill
"add x11, %[output_block_data], x7\n"
"ldp x7, x6, [sp, #120]\n" // 16-byte Folded Reload
"ldr x23, [sp, #112]\n" // 8-byte Folded Reload
"ldp x22, x19, [sp, #88]\n" // 16-byte Folded Reload
"add x10, %[output_block_data], x10\n"
"dup v5.8b, v5.b[0]\n"
"dup v6.8b, v6.b[0]\n"
"str x10, [sp, #152]\n" // 8-byte Folded Spill
"add x9, %[output_block_data], x9\n"
"add x10, %[output_block_data], x15\n"
"mov w15, #4\n"
"mov x20, x14\n"
"str %[function_params], [sp, #280]\n" // 8-byte Folded Spill
"str x11, [sp, #64]\n" // 8-byte Folded Spill
"str %[scratch_block_data], [sp, #200]\n" // 8-byte Folded Spill
"str w25, [sp, #164]\n" // 4-byte Folded Spill
"str x9, [sp, #288]\n" // 8-byte Folded Spill
"b " DC_KERNEL_MULT_4 "f\n"
DC_KERNEL_MULT_2 ":\n" // in Loop: Header=BB205_4 Depth=1
"mov %[bias_data], x11\n"
DC_KERNEL_MULT_3 ":\n" // in Loop: Header=BB205_4 Depth=1
"ldr w13, [sp, #28]\n" // 4-byte Folded Reload
"ldr w12, [sp, #12]\n" // 4-byte Folded Reload
"ldr x11, [sp, #48]\n" // 8-byte Folded Reload
"add w13, w13, #1\n" // =1
"str w13, [sp, #28]\n" // 4-byte Folded Spill
"cmp w13, w12\n"
"ldr x13, [sp, #16]\n" // 8-byte Folded Reload
"add x11, x11, #8\n" // =8
"str x11, [sp, #48]\n" // 8-byte Folded Spill
"add x13, x13, #8\n" // =8
"b.eq " DC_KERNEL_MULT_22 "f\n"
DC_KERNEL_MULT_4 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB205_18 Depth 2
// Child Loop BB205_20 Depth 3
// Child Loop BB205_21 Depth 4
// Child Loop BB205_7 Depth 2
// Child Loop BB205_9 Depth 3
// Child Loop BB205_13 Depth 3
"ldr x12, [sp, #32]\n" // 8-byte Folded Reload
"ldr x14, [sp, #56]\n" // 8-byte Folded Reload
"ldp q20, q7, [x12]\n"
"ldp q19, q16, [x12, #32]\n"
"ldp q18, q17, [x12, #64]\n"
"cmp w14, #4\n" // =4
"add x12, x12, #96\n" // =96
"str x12, [sp, #32]\n" // 8-byte Folded Spill
"str x13, [sp, #16]\n" // 8-byte Folded Spill
"b.ne " DC_KERNEL_MULT_15 "f\n"
// %bb.5: // in Loop: Header=BB205_4 Depth=1
"mov %[filter_workspace], xzr\n"
"mov x5, x13\n"
"b " DC_KERNEL_MULT_7 "f\n"
DC_KERNEL_MULT_6 ":\n" // in Loop: Header=BB205_7 Depth=2
"add %[filter_workspace], x1, #1\n" // =1
"cmp %[filter_workspace], #2\n" // =2
"add x5, x5, #4\n" // =4
"mov v18.16b, v17.16b\n"
"mov v19.16b, v16.16b\n"
"mov v20.16b, v7.16b\n"
"b.eq " DC_KERNEL_MULT_3 "b\n"
DC_KERNEL_MULT_7 ":\n" // Parent Loop BB205_4 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB205_9 Depth 3
// Child Loop BB205_13 Depth 3
"ldr q21, [%[bias_data]], #16\n"
"ldr w12, [%[scratch_block_data]]\n"
"ldp %[function_params], x13, [sp, #248]\n" // 16-byte Folded Reload
"ldr x16, [sp, #240]\n" // 8-byte Folded Reload
"ldr x14, [sp, #280]\n" // 8-byte Folded Reload
"fmov s22, w12\n"
"add x13, %[scratch_block_data], x13\n"
"ldr w16, [%[scratch_block_data], x16]\n"
"mov v22.s[1], w12\n"
"ld1 { v22.s }[2], [x13]\n"
"ldr x13, [sp, #232]\n" // 8-byte Folded Reload
"ldr w14, [%[scratch_block_data], x14]\n"
"fmov s23, w16\n"
"ldr w4, [%[scratch_block_data], %[function_params]]\n"
"add x13, %[scratch_block_data], x13\n"
"mov v23.s[1], w16\n"
"ld1 { v23.s }[2], [x13]\n"
"fmov s24, w14\n"
"mov v24.s[1], w14\n"
"dup v25.4s, w14\n"
"mov v28.16b, v21.16b\n"
"mov v29.16b, v21.16b\n"
"mov v30.16b, v21.16b\n"
"dup v26.4s, w4\n"
"mov v31.16b, v21.16b\n"
"mov v24.s[2], w4\n"
"cmp w25, #1\n" // =1
".word 0x4e99965c // sdot v28.4s, v18.16b, v25.16b\n"
".word 0x4e99967d // sdot v29.4s, v19.16b, v25.16b\n"
".word 0x4e99969e // sdot v30.4s, v20.16b, v25.16b\n"
"mov v24.s[3], w14\n"
"mov v22.s[3], w12\n"
"mov v23.s[3], w16\n"
".word 0x4e9a969f // sdot v31.4s, v20.16b, v26.16b\n"
"b.lt " DC_KERNEL_MULT_14 "f\n"
// %bb.8: // in Loop: Header=BB205_7 Depth=2
"stp %[filter_workspace], %[bias_data], [sp, #216]\n" // 16-byte Folded Spill
"mov w13, w25\n"
"str x5, [sp, #208]\n" // 8-byte Folded Spill
"mov x16, x5\n"
"mov x14, %[scratch_block_data]\n"
"ldp x25, %[scratch_block_data], [sp, #168]\n" // 16-byte Folded Reload
"mov x15, x10\n"
"mov x9, x8\n"
"mov x8, x24\n"
"mov x24, x28\n"
"mov x28, x27\n"
"ldp %[filter_workspace], x27, [sp, #144]\n" // 16-byte Folded Reload
"ldr x5, [sp, #136]\n" // 8-byte Folded Reload
"ldr %[bias_data], [sp, #104]\n" // 8-byte Folded Reload
"ldp x10, x11, [sp, #64]\n" // 16-byte Folded Reload
"shl v25.4s, v20.4s, #8\n"
"shl v26.4s, v19.4s, #8\n"
"shl v27.4s, v18.4s, #8\n"
DC_KERNEL_MULT_9 ":\n" // Parent Loop BB205_4 Depth=1
// Parent Loop BB205_7 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4f96e29c // sdot v28.4s, v20.16b, v22.4b[0]\n"
".word 0x4f96ea9d // sdot v29.4s, v20.16b, v22.4b[2]\n"
".word 0x4f98ea7e // sdot v30.4s, v19.16b, v24.4b[2]\n"
".word 0x4f96ea7c // sdot v28.4s, v19.16b, v22.4b[2]\n"
".word 0x4f97e27f // sdot v31.4s, v19.16b, v23.4b[0]\n"
".word 0x4f98ea5d // sdot v29.4s, v18.16b, v24.4b[2]\n"
".word 0x4f97e25e // sdot v30.4s, v18.16b, v23.4b[0]\n"
"sqrdmulh v28.4s, v28.4s, v1.4s\n"
".word 0x4f97ea5f // sdot v31.4s, v18.16b, v23.4b[2]\n"
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrdmulh v30.4s, v30.4s, v1.4s\n"
"sqrshl v28.4s, v28.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
"sqrshl v29.4s, v29.4s, v2.4s\n"
"sqrshl v30.4s, v30.4s, v2.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqrshl v31.4s, v31.4s, v2.4s\n"
"sqxtn v30.4h, v30.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqxtn2 v30.8h, v31.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqadd v29.8h, v30.8h, v0.8h\n"
"sqxtun v28.8b, v28.8h\n"
"sqxtun2 v28.16b, v29.8h\n"
"umax v28.16b, v28.16b, v3.16b\n"
"add %[function_params], x7, x16\n"
"umin v28.16b, v28.16b, v4.16b\n"
"add x21, x11, x16\n"
"str s28, [%[output_block_data], x16]\n"
"st1 { v28.s }[1], [%[function_params]]\n"
"add %[function_params], x10, x16\n"
"st1 { v28.s }[2], [x21]\n"
"st1 { v28.s }[3], [%[function_params]]\n"
"mov x12, x14\n"
"add x21, x14, x20\n"
"ldr w4, [x14, #4]!\n"
"ld1 { v24.s }[1], [x21]\n"
"add x21, x12, x19\n"
"ld1 { v23.s }[1], [x21]\n"
"mov v22.s[1], w4\n"
"add %[function_params], x12, x22\n"
"ld1 { v24.s }[3], [%[function_params]]\n"
"add %[function_params], x12, x5\n"
"ld1 { v22.s }[3], [%[function_params]]\n"
"add x12, x12, %[bias_data]\n"
"mov v28.16b, v21.16b\n"
"ld1 { v23.s }[3], [x12]\n"
"mov v29.16b, v21.16b\n"
"mov v30.16b, v21.16b\n"
".word 0x4f96e33c // sdot v28.4s, v25.16b, v22.4b[0]\n"
"mov v31.16b, v21.16b\n"
".word 0x4f98e33e // sdot v30.4s, v25.16b, v24.4b[0]\n"
".word 0x4f96eb3d // sdot v29.4s, v25.16b, v22.4b[2]\n"
".word 0x4f96eb5c // sdot v28.4s, v26.16b, v22.4b[2]\n"
".word 0x4f98eb3f // sdot v31.4s, v25.16b, v24.4b[2]\n"
".word 0x4f98eb5e // sdot v30.4s, v26.16b, v24.4b[2]\n"
".word 0x4f98e35d // sdot v29.4s, v26.16b, v24.4b[0]\n"
".word 0x4f98e37c // sdot v28.4s, v27.16b, v24.4b[0]\n"
".word 0x4f97e35f // sdot v31.4s, v26.16b, v23.4b[0]\n"
".word 0x4f97e37e // sdot v30.4s, v27.16b, v23.4b[0]\n"
".word 0x4f98eb7d // sdot v29.4s, v27.16b, v24.4b[2]\n"
"sqrdmulh v28.4s, v28.4s, v1.4s\n"
".word 0x4f97eb7f // sdot v31.4s, v27.16b, v23.4b[2]\n"
"sqrdmulh v30.4s, v30.4s, v1.4s\n"
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrshl v28.4s, v28.4s, v2.4s\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
"sqrshl v30.4s, v30.4s, v2.4s\n"
"sqrshl v29.4s, v29.4s, v2.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqrshl v31.4s, v31.4s, v2.4s\n"
"sqxtn v30.4h, v30.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqxtn2 v30.8h, v31.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqadd v29.8h, v30.8h, v0.8h\n"
"sqxtun v28.8b, v28.8h\n"
"sqxtun2 v28.16b, v29.8h\n"
"umax v28.16b, v28.16b, v3.16b\n"
"add x12, x25, x16\n"
"umin v28.16b, v28.16b, v4.16b\n"
"add %[function_params], x9, x16\n"
"str s28, [x23, x16]\n"
"st1 { v28.s }[1], [x12]\n"
"add x12, x8, x16\n"
"mov v29.16b, v21.16b\n"
"ushr v10.2d, v22.2d, #16\n"
"mov v30.16b, v21.16b\n"
"mov v31.16b, v21.16b\n"
"st1 { v28.s }[2], [%[function_params]]\n"
"st1 { v28.s }[3], [x12]\n"
"ushr v28.2d, v24.2d, #16\n"
".word 0x4f8ae29d // sdot v29.4s, v20.16b, v10.4b[0]\n"
"mov v8.16b, v21.16b\n"
".word 0x4f9ce29f // sdot v31.4s, v20.16b, v28.4b[0]\n"
".word 0x4f8aea9e // sdot v30.4s, v20.16b, v10.4b[2]\n"
".word 0x4f8aea7d // sdot v29.4s, v19.16b, v10.4b[2]\n"
"ushr v9.2d, v23.2d, #16\n"
".word 0x4f9cea88 // sdot v8.4s, v20.16b, v28.4b[2]\n"
".word 0x4f9cea7f // sdot v31.4s, v19.16b, v28.4b[2]\n"
".word 0x4f9ce27e // sdot v30.4s, v19.16b, v28.4b[0]\n"
".word 0x4f9ce25d // sdot v29.4s, v18.16b, v28.4b[0]\n"
".word 0x4f89e268 // sdot v8.4s, v19.16b, v9.4b[0]\n"
".word 0x4f89e25f // sdot v31.4s, v18.16b, v9.4b[0]\n"
".word 0x4f9cea5e // sdot v30.4s, v18.16b, v28.4b[2]\n"
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
".word 0x4f89ea48 // sdot v8.4s, v18.16b, v9.4b[2]\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
"sqrdmulh v30.4s, v30.4s, v1.4s\n"
"sqrshl v29.4s, v29.4s, v2.4s\n"
"sqrdmulh v8.4s, v8.4s, v1.4s\n"
"sqrshl v31.4s, v31.4s, v2.4s\n"
"sqrshl v30.4s, v30.4s, v2.4s\n"
"sqxtn v29.4h, v29.4s\n"
"sqrshl v8.4s, v8.4s, v2.4s\n"
"sqxtn v31.4h, v31.4s\n"
"sqxtn2 v29.8h, v30.4s\n"
"sqxtn2 v31.8h, v8.4s\n"
"sqadd v29.8h, v29.8h, v0.8h\n"
"sqadd v30.8h, v31.8h, v0.8h\n"
"sqxtun v29.8b, v29.8h\n"
"sqxtun2 v29.16b, v30.8h\n"
"umax v29.16b, v29.16b, v3.16b\n"
"add %[function_params], x24, x16\n"
"umin v29.16b, v29.16b, v4.16b\n"
"mov v30.16b, v21.16b\n"
"add x12, x28, x16\n"
"str s29, [%[scratch_block_data], x16]\n"
"st1 { v29.s }[1], [%[function_params]]\n"
"add %[function_params], x26, x16\n"
"mov v31.16b, v21.16b\n"
"mov v8.16b, v21.16b\n"
".word 0x4f8ae33e // sdot v30.4s, v25.16b, v10.4b[0]\n"
"st1 { v29.s }[2], [x12]\n"
"st1 { v29.s }[3], [%[function_params]]\n"
"mov v29.16b, v21.16b\n"
".word 0x4f9ce328 // sdot v8.4s, v25.16b, v28.4b[0]\n"
".word 0x4f8aeb3f // sdot v31.4s, v25.16b, v10.4b[2]\n"
".word 0x4f8aeb5e // sdot v30.4s, v26.16b, v10.4b[2]\n"
".word 0x4f9ceb3d // sdot v29.4s, v25.16b, v28.4b[2]\n"
".word 0x4f9ceb48 // sdot v8.4s, v26.16b, v28.4b[2]\n"
".word 0x4f9ce35f // sdot v31.4s, v26.16b, v28.4b[0]\n"
".word 0x4f9ce37e // sdot v30.4s, v27.16b, v28.4b[0]\n"
".word 0x4f89e35d // sdot v29.4s, v26.16b, v9.4b[0]\n"
".word 0x4f89e368 // sdot v8.4s, v27.16b, v9.4b[0]\n"
".word 0x4f9ceb7f // sdot v31.4s, v27.16b, v28.4b[2]\n"
"sqrdmulh v30.4s, v30.4s, v1.4s\n"
".word 0x4f89eb7d // sdot v29.4s, v27.16b, v9.4b[2]\n"
"sqrdmulh v28.4s, v8.4s, v1.4s\n"
"sqrdmulh v31.4s, v31.4s, v1.4s\n"
"sqrshl v30.4s, v30.4s, v2.4s\n"
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrshl v28.4s, v28.4s, v2.4s\n"
"sqrshl v31.4s, v31.4s, v2.4s\n"
"sqxtn v30.4h, v30.4s\n"
"sqrshl v29.4s, v29.4s, v2.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqxtn2 v30.8h, v31.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqadd v29.8h, v30.8h, v0.8h\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqxtun v29.8b, v29.8h\n"
"sqxtun2 v29.16b, v28.8h\n"
"umax v28.16b, v29.16b, v3.16b\n"
"add x12, %[filter_workspace], x16\n"
"umin v8.16b, v28.16b, v4.16b\n"
"str s8, [x27, x16]\n"
"st1 { v8.s }[1], [x12]\n"
"ldr x12, [sp, #288]\n" // 8-byte Folded Reload
"mov v28.16b, v21.16b\n"
"mov v29.16b, v21.16b\n"
"mov v30.16b, v21.16b\n"
"mov v31.16b, v21.16b\n"
"ushr v24.2d, v24.2d, #32\n"
"add %[function_params], x15, x16\n"
"add x12, x12, x16\n"
"subs w13, w13, #1\n" // =1
"ushr v22.2d, v22.2d, #32\n"
"ushr v23.2d, v23.2d, #32\n"
".word 0x4f98e25c // sdot v28.4s, v18.16b, v24.4b[0]\n"
".word 0x4f98e27d // sdot v29.4s, v19.16b, v24.4b[0]\n"
".word 0x4f98e29e // sdot v30.4s, v20.16b, v24.4b[0]\n"
".word 0x4f98ea9f // sdot v31.4s, v20.16b, v24.4b[2]\n"
"add x16, x16, x6\n"
"st1 { v8.s }[2], [%[function_params]]\n"
"st1 { v8.s }[3], [x12]\n"
"b.ne " DC_KERNEL_MULT_9 "b\n"
// %bb.10: // in Loop: Header=BB205_7 Depth=2
"ldr w25, [sp, #164]\n" // 4-byte Folded Reload
"ldp x21, %[scratch_block_data], [sp, #192]\n" // 16-byte Folded Reload
"ldr %[function_params], [sp, #184]\n" // 8-byte Folded Reload
"ldp %[filter_workspace], %[bias_data], [sp, #216]\n" // 16-byte Folded Reload
"ldr x5, [sp, #208]\n" // 8-byte Folded Reload
"add x13, %[output_block_data], x16\n"
"mov w12, w25\n"
"mov x27, x28\n"
"mov x28, x24\n"
"mov x24, x8\n"
"mov x8, x9\n"
"mov x10, x15\n"
"mov w15, #4\n"
"ldr w16, [sp, #276]\n" // 4-byte Folded Reload
"cmp w12, w16\n"
"b.ge " DC_KERNEL_MULT_6 "b\n"
DC_KERNEL_MULT_11 ":\n" // in Loop: Header=BB205_7 Depth=2
"ldr w12, [sp, #272]\n" // 4-byte Folded Reload
"cmp w12, #1\n" // =1
"b.lt " DC_KERNEL_MULT_6 "b\n"
// %bb.12: // in Loop: Header=BB205_7 Depth=2
"add x12, x14, #4\n" // =4
"ldr x14, [sp, #240]\n" // 8-byte Folded Reload
"ldr x16, [sp, #280]\n" // 8-byte Folded Reload
"add x14, x12, x14\n"
"ld1 { v23.s }[1], [x14]\n"
"ldr x14, [sp, #232]\n" // 8-byte Folded Reload
"add x16, x12, x16\n"
"ld1 { v24.s }[1], [x16]\n"
"add x14, x12, x14\n"
"ld1 { v23.s }[3], [x14]\n"
"ldp x16, x14, [sp, #248]\n" // 16-byte Folded Reload
"add x16, x12, x16\n"
"ld1 { v24.s }[3], [x16]\n"
"ldr x16, [sp, #40]\n" // 8-byte Folded Reload
"ld1 { v22.s }[1], [x12], x14\n"
"ld1 { v22.s }[3], [x12]\n"
"ldr w12, [sp, #272]\n" // 4-byte Folded Reload
DC_KERNEL_MULT_13 ":\n" // Parent Loop BB205_4 Depth=1
// Parent Loop BB205_7 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4f96e29c // sdot v28.4s, v20.16b, v22.4b[0]\n"
".word 0x4f96ea9d // sdot v29.4s, v20.16b, v22.4b[2]\n"
".word 0x4f98ea7e // sdot v30.4s, v19.16b, v24.4b[2]\n"
".word 0x4f96ea7c // sdot v28.4s, v19.16b, v22.4b[2]\n"
".word 0x4f97e27f // sdot v31.4s, v19.16b, v23.4b[0]\n"
".word 0x4f98ea5d // sdot v29.4s, v18.16b, v24.4b[2]\n"
".word 0x4f97e25e // sdot v30.4s, v18.16b, v23.4b[0]\n"
"sqrdmulh v25.4s, v28.4s, v1.4s\n"
".word 0x4f97ea5f // sdot v31.4s, v18.16b, v23.4b[2]\n"
"sqrdmulh v26.4s, v29.4s, v1.4s\n"
"sqrdmulh v27.4s, v30.4s, v1.4s\n"
"sqrshl v25.4s, v25.4s, v2.4s\n"
"sqrdmulh v28.4s, v31.4s, v1.4s\n"
"sqrshl v26.4s, v26.4s, v2.4s\n"
"sqrshl v27.4s, v27.4s, v2.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqrshl v28.4s, v28.4s, v2.4s\n"
"sqxtn v27.4h, v27.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqxtn2 v27.8h, v28.4s\n"
"sqadd v25.8h, v25.8h, v0.8h\n"
"sqadd v26.8h, v27.8h, v0.8h\n"
"sqxtun v25.8b, v25.8h\n"
"sqxtun2 v25.16b, v26.8h\n"
"umax v25.16b, v25.16b, v3.16b\n"
"add x14, x13, x21\n"
"umin v25.16b, v25.16b, v4.16b\n"
"str s25, [x13]\n"
"st1 { v25.s }[1], [x14]\n"
"add x14, x13, %[function_params]\n"
"ushr v24.2d, v24.2d, #8\n"
"mov v28.16b, v21.16b\n"
"mov v29.16b, v21.16b\n"
"mov v30.16b, v21.16b\n"
"mov v31.16b, v21.16b\n"
"st1 { v25.s }[2], [x14]\n"
"add x14, x13, x16\n"
"subs w12, w12, #1\n" // =1
"ushr v22.2d, v22.2d, #8\n"
"ushr v23.2d, v23.2d, #8\n"
".word 0x4f98e25c // sdot v28.4s, v18.16b, v24.4b[0]\n"
".word 0x4f98e27d // sdot v29.4s, v19.16b, v24.4b[0]\n"
".word 0x4f98e29e // sdot v30.4s, v20.16b, v24.4b[0]\n"
"add x13, x13, x17\n"
".word 0x4f98ea9f // sdot v31.4s, v20.16b, v24.4b[2]\n"
"st1 { v25.s }[3], [x14]\n"
"b.ne " DC_KERNEL_MULT_13 "b\n"
"b " DC_KERNEL_MULT_6 "b\n"
DC_KERNEL_MULT_14 ":\n" // in Loop: Header=BB205_7 Depth=2
"ldr x11, [sp, #48]\n" // 8-byte Folded Reload
"ldr %[function_params], [sp, #184]\n" // 8-byte Folded Reload
"mov w12, wzr\n"
"mov x14, %[scratch_block_data]\n"
"add x13, x11, %[filter_workspace], lsl #2\n"
"ldr w16, [sp, #276]\n" // 4-byte Folded Reload
"cmp w12, w16\n"
"b.ge " DC_KERNEL_MULT_6 "b\n"
"b " DC_KERNEL_MULT_11 "b\n"
DC_KERNEL_MULT_15 ":\n" // in Loop: Header=BB205_4 Depth=1
"ldr w14, [sp, #8]\n" // 4-byte Folded Reload
"add x11, %[bias_data], #32\n" // =32
"tbnz w14, #0, " DC_KERNEL_MULT_2 "b\n"
// %bb.16: // in Loop: Header=BB205_4 Depth=1
"ldp q21, q22, [%[bias_data]]\n"
"ldr %[filter_workspace], [sp, #48]\n" // 8-byte Folded Reload
"mov x14, xzr\n"
"b " DC_KERNEL_MULT_18 "f\n"
DC_KERNEL_MULT_17 ":\n" // in Loop: Header=BB205_18 Depth=2
"ldr x12, [sp, #56]\n" // 8-byte Folded Reload
"ldp x21, %[scratch_block_data], [sp, #192]\n" // 16-byte Folded Reload
"add x14, x14, #1\n" // =1
"cmp x14, x12\n"
"add %[filter_workspace], x1, x21\n"
"b.eq " DC_KERNEL_MULT_2 "b\n"
DC_KERNEL_MULT_18 ":\n" // Parent Loop BB205_4 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB205_20 Depth 3
// Child Loop BB205_21 Depth 4
"ldr x16, [sp, #256]\n" // 8-byte Folded Reload
"mov w13, wzr\n"
"madd x12, x14, x16, %[scratch_block_data]\n"
"mov %[scratch_block_data], x16\n"
"ldr w16, [x12]\n"
"add %[function_params], x12, %[scratch_block_data]\n"
"fmov s23, w16\n"
"mov v23.s[1], w16\n"
"ld1 { v23.s }[2], [%[function_params]]\n"
"ldr %[function_params], [sp, #280]\n" // 8-byte Folded Reload
"mov v23.s[3], w16\n"
"add %[function_params], x12, %[function_params]\n"
"ld1r { v24.4s }, [%[function_params]]\n"
"mov x16, %[filter_workspace]\n"
"b " DC_KERNEL_MULT_20 "f\n"
DC_KERNEL_MULT_19 ":\n" // in Loop: Header=BB205_20 Depth=3
"ldr w4, [sp, #276]\n" // 4-byte Folded Reload
"add w13, w13, #1\n" // =1
"cmp w13, w4\n"
"b.eq " DC_KERNEL_MULT_17 "b\n"
DC_KERNEL_MULT_20 ":\n" // Parent Loop BB205_4 Depth=1
// Parent Loop BB205_18 Depth=2
// => This Loop Header: Depth=3
// Child Loop BB205_21 Depth 4
"ldr x21, [sp, #280]\n" // 8-byte Folded Reload
"add x12, x12, #4\n" // =4
"mov %[function_params], x12\n"
"ld1 { v23.s }[1], [%[function_params]], x21\n"
"ldr w21, [sp, #268]\n" // 4-byte Folded Reload
"ld1 { v24.s }[1], [%[function_params]]\n"
"ldr w4, [sp, #272]\n" // 4-byte Folded Reload
"cmp w13, w21\n"
"add x21, x12, %[scratch_block_data]\n"
"ld1 { v23.s }[3], [x21]\n"
"csel w4, w4, w15, eq\n"
"cmp w4, #1\n" // =1
"b.lt " DC_KERNEL_MULT_19 "b\n"
DC_KERNEL_MULT_21 ":\n" // Parent Loop BB205_4 Depth=1
// Parent Loop BB205_18 Depth=2
// Parent Loop BB205_20 Depth=3
// => This Inner Loop Header: Depth=4
"mov v25.16b, v21.16b\n"
"mov v26.16b, v22.16b\n"
".word 0x4f97e299 // sdot v25.4s, v20.16b, v23.4b[0]\n"
".word 0x4f97e0fa // sdot v26.4s, v7.16b, v23.4b[0]\n"
".word 0x4f97ea79 // sdot v25.4s, v19.16b, v23.4b[2]\n"
".word 0x4f97ea1a // sdot v26.4s, v16.16b, v23.4b[2]\n"
".word 0x4f98e259 // sdot v25.4s, v18.16b, v24.4b[0]\n"
".word 0x4f98e23a // sdot v26.4s, v17.16b, v24.4b[0]\n"
"sqrdmulh v25.4s, v25.4s, v1.4s\n"
"sqrdmulh v26.4s, v26.4s, v1.4s\n"
"sqrshl v25.4s, v25.4s, v2.4s\n"
"sqrshl v26.4s, v26.4s, v2.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v25.8h, v25.8h, v0.8h\n"
"sqxtun v25.8b, v25.8h\n"
"umax v25.8b, v25.8b, v5.8b\n"
"umin v25.8b, v25.8b, v6.8b\n"
"subs w4, w4, #1\n" // =1
"ushr v23.2d, v23.2d, #8\n"
"ushr v24.2d, v24.2d, #8\n"
"str d25, [x16]\n"
"add x16, x16, x17\n"
"b.ne " DC_KERNEL_MULT_21 "b\n"
"b " DC_KERNEL_MULT_19 "b\n"
DC_KERNEL_MULT_22 ":\n"
// Compiled intrinsics total stack 400, now 304 for spillage only.
"add sp, sp, #304\n" // =400
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_MULT_1
#undef DC_KERNEL_MULT_2
#undef DC_KERNEL_MULT_3
#undef DC_KERNEL_MULT_4
#undef DC_KERNEL_MULT_5
#undef DC_KERNEL_MULT_6
#undef DC_KERNEL_MULT_7
#undef DC_KERNEL_MULT_8
#undef DC_KERNEL_MULT_9
#undef DC_KERNEL_MULT_10
#undef DC_KERNEL_MULT_11
#undef DC_KERNEL_MULT_12
#undef DC_KERNEL_MULT_13
#undef DC_KERNEL_MULT_14
#undef DC_KERNEL_MULT_15
#undef DC_KERNEL_MULT_16
#undef DC_KERNEL_MULT_17
#undef DC_KERNEL_MULT_18
#undef DC_KERNEL_MULT_19
#undef DC_KERNEL_MULT_20
#undef DC_KERNEL_MULT_21
#undef DC_KERNEL_MULT_22
} // NOLINT(readability/fn_size) Manually unrolled.
static void __attribute__((noinline))
Run(const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/2> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_MULT_STRIDE_1 "1"
#define DC_KERNEL_MULT_STRIDE_2 "2"
#define DC_KERNEL_MULT_STRIDE_3 "3"
#define DC_KERNEL_MULT_STRIDE_4 "4"
#define DC_KERNEL_MULT_STRIDE_5 "5"
#define DC_KERNEL_MULT_STRIDE_6 "6"
#define DC_KERNEL_MULT_STRIDE_7 "7"
#define DC_KERNEL_MULT_STRIDE_8 "8"
#define DC_KERNEL_MULT_STRIDE_9 "9"
#define DC_KERNEL_MULT_STRIDE_10 "10"
#define DC_KERNEL_MULT_STRIDE_11 "11"
#define DC_KERNEL_MULT_STRIDE_12 "12"
#define DC_KERNEL_MULT_STRIDE_13 "13"
asm volatile(
"ldr w15, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"ldp w11, w6, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldpsw x9, x10, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldrsw x12, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"ldrsw x13, [%[function_params], #" STR(DP_OFFSET_OUTPUT_DEPTH) "]\n"
"ldr w14, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"add x17, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "\n" // =40
"add x5, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "\n" // =44
"add x7, %[function_params], #" STR(DP_OFFSET_OUTPUT_MULTIPLIER) "\n" // =32
"add x19, %[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT) "\n" // =36
"add %[function_params], %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"sxtw x11, w11\n"
"ld1r { v0.8h }, [%[function_params]]\n"
"ld1r { v1.4s }, [x7]\n"
"ld1r { v2.4s }, [x19]\n"
"ld1r { v3.8b }, [x17]\n"
"ld1r { v4.8b }, [x5]\n"
"cmp w15, #2\n" // =2
"ccmp w6, w11, #0, lt\n"
"lsl x5, x6, #2\n"
"csel w6, w6, w11, lt\n"
"mov x8, xzr\n"
"add x16, %[scratch_block_data], #4\n" // =4
"lsl x17, x10, #1\n"
"add %[function_params], x10, x10, lsl #1\n"
"sxtw x6, w6\n"
"add x7, x9, x13\n"
"b " DC_KERNEL_MULT_STRIDE_13 "f\n"
DC_KERNEL_MULT_STRIDE_1 ":\n" // in Loop: Header=BB206_13 Depth=1
"ldr w20, [%[scratch_block_data]]\n"
"add x21, %[scratch_block_data], x10\n"
"ldp q5, q6, [%[filter_workspace]]\n"
"ldp q7, q16, [%[filter_workspace], #32]\n"
"fmov s21, w20\n"
"mov v21.s[1], w20\n"
"ld1 { v21.s }[2], [x21]\n"
"ldp q17, q18, [%[filter_workspace], #64]\n"
"ldp q19, q20, [%[bias_data]], #32\n"
"ldr s22, [%[scratch_block_data], x17]\n"
"ubfiz x19, x8, #3, #29\n"
"add %[filter_workspace], %[filter_workspace], #96\n" // =96
"add x19, %[output_block_data], x19\n"
"cmp w14, #2\n" // =2
"mov v21.s[3], w20\n"
"mov x20, xzr\n"
"b.ne " DC_KERNEL_MULT_STRIDE_7 "f\n"
// %bb.2: // in Loop: Header=BB206_13 Depth=1
"dup v22.4s, v22.s[0]\n"
"add x21, %[scratch_block_data], %[function_params]\n"
"add x22, %[scratch_block_data], x10, lsl #2\n"
"ld1 { v22.s }[2], [x21]\n"
"ld1r { v23.4s }, [x22]\n"
"mov x21, xzr\n"
"b " DC_KERNEL_MULT_STRIDE_4 "f\n"
DC_KERNEL_MULT_STRIDE_3 ":\n" // in Loop: Header=BB206_4 Depth=2
"and x22, x20, #0xfffffffc\n"
"add x23, x16, x22\n"
"lsl x24, x10, #2\n"
"mov x22, x23\n"
"ld1 { v21.s }[1], [x22], x24\n"
"add x24, x23, x17\n"
"ld1 { v22.s }[1], [x24]\n"
"add x24, x23, x10\n"
"ld1 { v21.s }[3], [x24]\n"
"add x23, x23, %[function_params]\n"
"ld1 { v22.s }[3], [x23]\n"
"mov v25.16b, v19.16b\n"
"mov v27.16b, v20.16b\n"
"ld1 { v23.s }[1], [x22]\n"
"ushr v29.2d, v21.2d, #16\n"
".word 0x4f9de0b9 // sdot v25.4s, v5.16b, v29.4b[0]\n"
".word 0x4f9de0db // sdot v27.4s, v6.16b, v29.4b[0]\n"
"mov v26.16b, v19.16b\n"
"mov v28.16b, v20.16b\n"
".word 0x4f9de8f9 // sdot v25.4s, v7.16b, v29.4b[2]\n"
".word 0x4f9dea1b // sdot v27.4s, v16.16b, v29.4b[2]\n"
"ushr v29.2d, v22.2d, #16\n"
".word 0x4f9de0ba // sdot v26.4s, v5.16b, v29.4b[0]\n"
".word 0x4f9de0dc // sdot v28.4s, v6.16b, v29.4b[0]\n"
"mov v24.16b, v19.16b\n"
".word 0x4f9de8fa // sdot v26.4s, v7.16b, v29.4b[2]\n"
".word 0x4f9dea1c // sdot v28.4s, v16.16b, v29.4b[2]\n"
".word 0x4f9de239 // sdot v25.4s, v17.16b, v29.4b[0]\n"
".word 0x4f9de25b // sdot v27.4s, v18.16b, v29.4b[0]\n"
"ushr v29.2d, v23.2d, #16\n"
".word 0x4f9de23a // sdot v26.4s, v17.16b, v29.4b[0]\n"
".word 0x4f9de25c // sdot v28.4s, v18.16b, v29.4b[0]\n"
"mov v29.16b, v19.16b\n"
".word 0x4f95e0b8 // sdot v24.4s, v5.16b, v21.4b[0]\n"
".word 0x4f96e0bd // sdot v29.4s, v5.16b, v22.4b[0]\n"
".word 0x4f95e8f8 // sdot v24.4s, v7.16b, v21.4b[2]\n"
".word 0x4f96e8fd // sdot v29.4s, v7.16b, v22.4b[2]\n"
".word 0x4f96e238 // sdot v24.4s, v17.16b, v22.4b[0]\n"
".word 0x4f97e23d // sdot v29.4s, v17.16b, v23.4b[0]\n"
"sqrdmulh v24.4s, v24.4s, v1.4s\n"
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrshl v24.4s, v24.4s, v2.4s\n"
"sqrshl v29.4s, v29.4s, v2.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v29.4s\n"
"sqadd v24.8h, v24.8h, v0.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v3.8b\n"
"add x22, x19, x9\n"
"mov v29.16b, v20.16b\n"
"umin v24.8b, v24.8b, v4.8b\n"
"str s24, [x19]\n"
"st1 { v24.s }[1], [x22]\n"
"mov v24.16b, v20.16b\n"
".word 0x4f95e0dd // sdot v29.4s, v6.16b, v21.4b[0]\n"
".word 0x4f96e0d8 // sdot v24.4s, v6.16b, v22.4b[0]\n"
".word 0x4f95ea1d // sdot v29.4s, v16.16b, v21.4b[2]\n"
".word 0x4f96ea18 // sdot v24.4s, v16.16b, v22.4b[2]\n"
".word 0x4f96e25d // sdot v29.4s, v18.16b, v22.4b[0]\n"
".word 0x4f97e258 // sdot v24.4s, v18.16b, v23.4b[0]\n"
"sqrdmulh v29.4s, v29.4s, v1.4s\n"
"sqrdmulh v24.4s, v24.4s, v1.4s\n"
"sqrshl v29.4s, v29.4s, v2.4s\n"
"sqrshl v24.4s, v24.4s, v2.4s\n"
"sqxtn v29.4h, v29.4s\n"
"sqxtn2 v29.8h, v24.4s\n"
"sqadd v24.8h, v29.8h, v0.8h\n"
"sqxtun v24.8b, v24.8h\n"
"sqrdmulh v25.4s, v25.4s, v1.4s\n"
"umax v24.8b, v24.8b, v3.8b\n"
"sqrdmulh v26.4s, v26.4s, v1.4s\n"
"sqrshl v25.4s, v25.4s, v2.4s\n"
"add x22, x22, #4\n" // =4
"umin v24.8b, v24.8b, v4.8b\n"
"sqrshl v26.4s, v26.4s, v2.4s\n"
"sqxtn v25.4h, v25.4s\n"
"str s24, [x19, #4]\n"
"st1 { v24.s }[1], [x22]\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v24.8h, v25.8h, v0.8h\n"
"sqrdmulh v27.4s, v27.4s, v1.4s\n"
"sqxtun v24.8b, v24.8h\n"
"sqrdmulh v28.4s, v28.4s, v1.4s\n"
"sqrshl v27.4s, v27.4s, v2.4s\n"
"umax v24.8b, v24.8b, v3.8b\n"
"add x23, x19, x13\n"
"add x24, x19, x7\n"
"sqrshl v28.4s, v28.4s, v2.4s\n"
"sqxtn v27.4h, v27.4s\n"
"umin v24.8b, v24.8b, v4.8b\n"
"str s24, [x23]\n"
"st1 { v24.s }[1], [x24]\n"
"sqxtn2 v27.8h, v28.4s\n"
"sqadd v24.8h, v27.8h, v0.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v3.8b\n"
"add x25, x24, #4\n" // =4
"umin v24.8b, v24.8b, v4.8b\n"
"add x21, x21, #1\n" // =1
"ushr v21.2d, v21.2d, #32\n"
"ushr v22.2d, v22.2d, #32\n"
"ushr v23.2d, v23.2d, #32\n"
"add x19, x23, x13\n"
"str s24, [x23, #4]\n"
"st1 { v24.s }[1], [x25]\n"
"add x20, x20, #4\n" // =4
DC_KERNEL_MULT_STRIDE_4 ":\n" // Parent Loop BB206_13 Depth=1
// => This Inner Loop Header: Depth=2
"cmp x21, x6\n"
"b.lt " DC_KERNEL_MULT_STRIDE_3 "b\n"
"b " DC_KERNEL_MULT_STRIDE_6 "f\n"
DC_KERNEL_MULT_STRIDE_5 ":\n" // in Loop: Header=BB206_6 Depth=2
"and x22, x20, #0xfffffffc\n"
"add x22, x16, x22\n"
"lsl x23, x10, #2\n"
"mov x25, x22\n"
"add x24, x22, x17\n"
"ld1 { v21.s }[1], [x25], x23\n"
"ld1 { v22.s }[1], [x24]\n"
"add x23, x22, x10\n"
"add x22, x22, %[function_params]\n"
"ld1 { v21.s }[3], [x23]\n"
"ld1 { v22.s }[3], [x22]\n"
"mov v24.16b, v19.16b\n"
"ld1 { v23.s }[1], [x25]\n"
"mov v25.16b, v19.16b\n"
".word 0x4f95e0b8 // sdot v24.4s, v5.16b, v21.4b[0]\n"
".word 0x4f96e0b9 // sdot v25.4s, v5.16b, v22.4b[0]\n"
".word 0x4f95e8f8 // sdot v24.4s, v7.16b, v21.4b[2]\n"
".word 0x4f96e8f9 // sdot v25.4s, v7.16b, v22.4b[2]\n"
".word 0x4f96e238 // sdot v24.4s, v17.16b, v22.4b[0]\n"
".word 0x4f97e239 // sdot v25.4s, v17.16b, v23.4b[0]\n"
"sqrdmulh v24.4s, v24.4s, v1.4s\n"
"sqrdmulh v25.4s, v25.4s, v1.4s\n"
"sqrshl v24.4s, v24.4s, v2.4s\n"
"sqrshl v25.4s, v25.4s, v2.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqadd v24.8h, v24.8h, v0.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v3.8b\n"
"add x22, x19, x9\n"
"mov v25.16b, v20.16b\n"
"umin v24.8b, v24.8b, v4.8b\n"
"str s24, [x19]\n"
"st1 { v24.s }[1], [x22]\n"
"mov v24.16b, v20.16b\n"
".word 0x4f95e0d9 // sdot v25.4s, v6.16b, v21.4b[0]\n"
".word 0x4f96e0d8 // sdot v24.4s, v6.16b, v22.4b[0]\n"
".word 0x4f95ea19 // sdot v25.4s, v16.16b, v21.4b[2]\n"
".word 0x4f96ea18 // sdot v24.4s, v16.16b, v22.4b[2]\n"
".word 0x4f96e259 // sdot v25.4s, v18.16b, v22.4b[0]\n"
".word 0x4f97e258 // sdot v24.4s, v18.16b, v23.4b[0]\n"
"sqrdmulh v25.4s, v25.4s, v1.4s\n"
"sqrdmulh v24.4s, v24.4s, v1.4s\n"
"sqrshl v25.4s, v25.4s, v2.4s\n"
"sqrshl v24.4s, v24.4s, v2.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v24.4s\n"
"sqadd v24.8h, v25.8h, v0.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v3.8b\n"
"add x22, x22, #4\n" // =4
"umin v24.8b, v24.8b, v4.8b\n"
"add x21, x21, #1\n" // =1
"ushr v21.2d, v21.2d, #16\n"
"ushr v22.2d, v22.2d, #16\n"
"ushr v23.2d, v23.2d, #16\n"
"str s24, [x19, #4]\n"
"st1 { v24.s }[1], [x22]\n"
"add x19, x19, x13\n"
"add x20, x20, #4\n" // =4
DC_KERNEL_MULT_STRIDE_6 ":\n" // Parent Loop BB206_13 Depth=1
// => This Inner Loop Header: Depth=2
"cmp x21, x11\n"
"b.lt " DC_KERNEL_MULT_STRIDE_5 "b\n"
"b " DC_KERNEL_MULT_STRIDE_12 "f\n"
DC_KERNEL_MULT_STRIDE_7 ":\n" // in Loop: Header=BB206_13 Depth=1
"mov x21, xzr\n"
"dup v22.4s, v22.s[0]\n"
"b " DC_KERNEL_MULT_STRIDE_11 "f\n"
DC_KERNEL_MULT_STRIDE_8 ":\n" // in Loop: Header=BB206_11 Depth=2
"and x22, x20, #0xfffffffc\n"
"add x22, x16, x22\n"
"mov x23, x22\n"
"ld1 { v21.s }[1], [x23], x17\n"
"add x22, x22, x10\n"
"mov v23.16b, v19.16b\n"
"mov v24.16b, v20.16b\n"
"ld1 { v22.s }[1], [x23]\n"
"ld1 { v21.s }[3], [x22]\n"
"cmp w15, #2\n" // =2
"ccmp x5, x20, #0, ne\n"
".word 0x4f96e237 // sdot v23.4s, v17.16b, v22.4b[0]\n"
".word 0x4f96e258 // sdot v24.4s, v18.16b, v22.4b[0]\n"
".word 0x4f95e0b7 // sdot v23.4s, v5.16b, v21.4b[0]\n"
".word 0x4f95e0d8 // sdot v24.4s, v6.16b, v21.4b[0]\n"
".word 0x4f95e8f7 // sdot v23.4s, v7.16b, v21.4b[2]\n"
".word 0x4f95ea18 // sdot v24.4s, v16.16b, v21.4b[2]\n"
"sqrdmulh v23.4s, v23.4s, v1.4s\n"
"sqrdmulh v24.4s, v24.4s, v1.4s\n"
"sqrshl v23.4s, v23.4s, v2.4s\n"
"sqrshl v24.4s, v24.4s, v2.4s\n"
"sqxtn v25.4h, v23.4s\n"
"sqxtn2 v25.8h, v24.4s\n"
"sqadd v24.8h, v25.8h, v0.8h\n"
"sqxtun v24.8b, v24.8h\n"
"umax v24.8b, v24.8b, v3.8b\n"
"umin v24.8b, v24.8b, v4.8b\n"
"ushr v23.2d, v21.2d, #16\n"
"str d24, [x19]\n"
"ushr v24.2d, v22.2d, #16\n"
"add x19, x19, x13\n"
"b.eq " DC_KERNEL_MULT_STRIDE_10 "f\n"
// %bb.9: // in Loop: Header=BB206_11 Depth=2
"mov v25.16b, v19.16b\n"
"mov v26.16b, v20.16b\n"
".word 0x4f98e239 // sdot v25.4s, v17.16b, v24.4b[0]\n"
".word 0x4f98e25a // sdot v26.4s, v18.16b, v24.4b[0]\n"
".word 0x4f97e0b9 // sdot v25.4s, v5.16b, v23.4b[0]\n"
".word 0x4f97e0da // sdot v26.4s, v6.16b, v23.4b[0]\n"
".word 0x4f97e8f9 // sdot v25.4s, v7.16b, v23.4b[2]\n"
".word 0x4f97ea1a // sdot v26.4s, v16.16b, v23.4b[2]\n"
"ushr v23.2d, v21.2d, #32\n"
"sqrdmulh v21.4s, v25.4s, v1.4s\n"
"ushr v24.2d, v22.2d, #32\n"
"sqrdmulh v22.4s, v26.4s, v1.4s\n"
"sqrshl v21.4s, v21.4s, v2.4s\n"
"sqrshl v22.4s, v22.4s, v2.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqadd v21.8h, v21.8h, v0.8h\n"
"sqxtun v21.8b, v21.8h\n"
"umax v21.8b, v21.8b, v3.8b\n"
"umin v21.8b, v21.8b, v4.8b\n"
"str d21, [x19]\n"
"add x19, x19, x13\n"
DC_KERNEL_MULT_STRIDE_10 ":\n" // in Loop: Header=BB206_11 Depth=2
"add x21, x21, #1\n" // =1
"add x20, x20, #4\n" // =4
"mov v22.16b, v24.16b\n"
"mov v21.16b, v23.16b\n"
DC_KERNEL_MULT_STRIDE_11 ":\n" // Parent Loop BB206_13 Depth=1
// => This Inner Loop Header: Depth=2
"cmp x21, x11\n"
"b.lt " DC_KERNEL_MULT_STRIDE_8 "b\n"
DC_KERNEL_MULT_STRIDE_12 ":\n" // in Loop: Header=BB206_13 Depth=1
"add x8, x8, #1\n" // =1
DC_KERNEL_MULT_STRIDE_13 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB206_11 Depth 2
// Child Loop BB206_4 Depth 2
// Child Loop BB206_6 Depth 2
"cmp x8, x12\n"
"b.lt " DC_KERNEL_MULT_STRIDE_1 "b\n"
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25");
#undef DC_KERNEL_MULT_STRIDE_1
#undef DC_KERNEL_MULT_STRIDE_2
#undef DC_KERNEL_MULT_STRIDE_3
#undef DC_KERNEL_MULT_STRIDE_4
#undef DC_KERNEL_MULT_STRIDE_5
#undef DC_KERNEL_MULT_STRIDE_6
#undef DC_KERNEL_MULT_STRIDE_7
#undef DC_KERNEL_MULT_STRIDE_8
#undef DC_KERNEL_MULT_STRIDE_9
#undef DC_KERNEL_MULT_STRIDE_10
#undef DC_KERNEL_MULT_STRIDE_11
#undef DC_KERNEL_MULT_STRIDE_12
#undef DC_KERNEL_MULT_STRIDE_13
}
static void __attribute__((noinline))
Run(const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/1> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_NO_MULT_1 "1"
#define DC_KERNEL_NO_MULT_2 "2"
#define DC_KERNEL_NO_MULT_3 "3"
#define DC_KERNEL_NO_MULT_4 "4"
#define DC_KERNEL_NO_MULT_5 "5"
#define DC_KERNEL_NO_MULT_6 "6"
#define DC_KERNEL_NO_MULT_7 "7"
#define DC_KERNEL_NO_MULT_8 "8"
#define DC_KERNEL_NO_MULT_9 "9"
#define DC_KERNEL_NO_MULT_10 "10"
#define DC_KERNEL_NO_MULT_11 "11"
#define DC_KERNEL_NO_MULT_12 "12"
#define DC_KERNEL_NO_MULT_13 "13"
#define DC_KERNEL_NO_MULT_14 "14"
#define DC_KERNEL_NO_MULT_15 "15"
#define DC_KERNEL_NO_MULT_16 "16"
#define DC_KERNEL_NO_MULT_17 "17"
#define DC_KERNEL_NO_MULT_18 "18"
#define DC_KERNEL_NO_MULT_19 "19"
#define DC_KERNEL_NO_MULT_20 "20"
#define DC_KERNEL_NO_MULT_21 "21"
#define DC_KERNEL_NO_MULT_22 "22"
#define DC_KERNEL_NO_MULT_23 "23"
#define DC_KERNEL_NO_MULT_24 "24"
#define DC_KERNEL_NO_MULT_25 "25"
#define DC_KERNEL_NO_MULT_26 "26"
#define DC_KERNEL_NO_MULT_27 "27"
#define DC_KERNEL_NO_MULT_28 "28"
#define DC_KERNEL_NO_MULT_29 "29"
#define DC_KERNEL_NO_MULT_30 "30"
#define DC_KERNEL_NO_MULT_31 "31"
#define DC_KERNEL_NO_MULT_32 "32"
#define DC_KERNEL_NO_MULT_33 "33"
asm volatile(
// Compiled code used block of 384 for spill out of total stack of 528.
"sub sp, sp, #384\n" // =528
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"str %[scratch_block_data], [sp, #376]\n" // 8-byte Folded Spill
"cmp w8, #1\n" // =1
"str x8, [sp, #56]\n" // 8-byte Folded Spill
"b.lt " DC_KERNEL_NO_MULT_33 "f\n"
// %bb.1:
"stp xzr, xzr, [sp, #72]\n" // 16-byte Folded Spill
"ldr w8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"str xzr, [sp, #88]\n" // 8-byte Folded Spill
"ldpsw x22, x5, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldr x11, [%[function_params], #" STR(DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL) "]\n"
"str w8, [sp, #340]\n" // 4-byte Folded Spill
"ldr w8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_MICRO_REPEATS) "]\n"
"ldrb w9, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "]\n"
"str x11, [sp, #40]\n" // 8-byte Folded Spill
"ldr x11, [%[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT_PER_CHANNEL) "]\n"
"str w8, [sp, #344]\n" // 4-byte Folded Spill
"ldr w8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"ldrsw x7, [%[function_params]]\n"
"str x11, [sp, #32]\n" // 8-byte Folded Spill
"ldrsw x11, [%[function_params], #" STR(DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"str w8, [sp, #348]\n" // 4-byte Folded Spill
"ldrb w8, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "]\n"
"ldr x26, [sp, #376]\n" // 8-byte Folded Reload
"mov x23, %[output_block_data]\n"
"add x10, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"dup v5.16b, w8\n"
"fmov s3, w8\n"
"lsl x8, x11, #5\n"
"dup v6.16b, w9\n"
"fmov s4, w9\n"
"str x8, [sp, #48]\n" // 8-byte Folded Spill
"add x8, x5, x26\n"
"lsl x9, x7, #1\n"
"ld1r { v0.8h }, [x10]\n"
"add x13, x5, x5, lsl #1\n"
"add x10, x22, x7\n"
"add x28, x8, #32\n" // =32
"add x8, x23, x9\n"
"str x13, [sp, #312]\n" // 8-byte Folded Spill
"add x13, x13, x26\n"
"str x8, [sp, #360]\n" // 8-byte Folded Spill
"add x8, x23, x10\n"
"str x8, [sp, #352]\n" // 8-byte Folded Spill
"add x8, x13, #32\n" // =32
"ldr w6, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"lsl x12, x5, #2\n"
"add x11, x5, x5, lsl #2\n"
"add x24, x22, x22, lsl #1\n"
"str x8, [sp, #368]\n" // 8-byte Folded Spill
"lsl x8, x5, #1\n"
"mov %[output_block_data], %[filter_workspace]\n"
"lsl %[filter_workspace], x22, #1\n"
"stp x11, x12, [sp, #296]\n" // 16-byte Folded Spill
"add x11, x11, x26\n"
"add x12, x12, x26\n"
"add x14, x9, x7\n"
"add x15, x9, x24\n"
"stp x8, x5, [sp, #320]\n" // 16-byte Folded Spill
"add x8, x8, x26\n"
"add x10, x11, #32\n" // =32
"add x11, x12, #32\n" // =32
"add x19, x8, #32\n" // =32
"add x12, x14, x24\n"
"add x13, x14, %[filter_workspace]\n"
"add x8, x14, x22\n"
"add x25, x23, x14\n"
"add x14, x23, x15\n"
"add x17, x9, x22\n"
"mov %[scratch_block_data], x19\n"
"mov x19, x14\n"
"add x14, x24, x7\n"
"add x21, x23, x17\n"
"mov w17, w6\n"
"add x15, x23, x14\n"
"add x14, %[filter_workspace], x7\n"
"add x6, x23, x12\n"
"add x12, x23, x13\n"
"add %[function_params], x23, x14\n"
"mov x14, x12\n"
"and w12, w17, #0xfffffffe\n"
"str w12, [sp, #20]\n" // 4-byte Folded Spill
"lsl x12, x7, #2\n"
"str x12, [sp, #152]\n" // 8-byte Folded Spill
"add x12, x23, x22\n"
"str x12, [sp, #144]\n" // 8-byte Folded Spill
"add x12, x23, x7\n"
"add x16, x9, %[filter_workspace]\n"
"str x12, [sp, #136]\n" // 8-byte Folded Spill
"add x12, x23, %[filter_workspace]\n"
"dup v7.8b, v3.b[0]\n"
"dup v14.8b, v4.b[0]\n"
"add x20, x23, x16\n"
"mov x13, x15\n"
"add x15, x23, x8\n"
"mov x5, %[filter_workspace]\n"
"str x12, [sp, #128]\n" // 8-byte Folded Spill
"mov x8, x24\n"
"add x12, x23, x24\n"
"mov w1, #4\n"
"stp x23, x12, [sp, #112]\n" // 16-byte Folded Spill
"str x26, [sp, #264]\n" // 8-byte Folded Spill
"str x22, [sp, #200]\n" // 8-byte Folded Spill
"str w17, [sp, #108]\n" // 4-byte Folded Spill
"str %[scratch_block_data], [sp, #96]\n" // 8-byte Folded Spill
"str x23, [sp, #24]\n" // 8-byte Folded Spill
"stp d14, d7, [sp, #160]\n" // 16-byte Folded Spill
"b " DC_KERNEL_NO_MULT_4 "f\n"
DC_KERNEL_NO_MULT_2 ":\n" // in Loop: Header=BB111_4 Depth=1
"mov %[bias_data], x9\n"
DC_KERNEL_NO_MULT_3 ":\n" // in Loop: Header=BB111_4 Depth=1
"ldr %[output_block_data], [sp, #48]\n" // 8-byte Folded Reload
"ldr x12, [sp, #264]\n" // 8-byte Folded Reload
"ldr x17, [sp, #88]\n" // 8-byte Folded Reload
"add x12, x12, %[output_block_data]\n"
"str x12, [sp, #264]\n" // 8-byte Folded Spill
"ldr x12, [sp, #112]\n" // 8-byte Folded Reload
"add x17, x17, #1\n" // =1
"add x12, x12, #8\n" // =8
"str x12, [sp, #112]\n" // 8-byte Folded Spill
"ldr x12, [sp, #72]\n" // 8-byte Folded Reload
"add x12, x12, %[output_block_data]\n"
"str x12, [sp, #72]\n" // 8-byte Folded Spill
"ldp x12, %[output_block_data], [sp, #56]\n" // 16-byte Folded Reload
"cmp x17, x12\n"
"ldr x12, [sp, #80]\n" // 8-byte Folded Reload
"add x12, x12, #8\n" // =8
"stp x12, x17, [sp, #80]\n" // 16-byte Folded Spill
"ldr w17, [sp, #108]\n" // 4-byte Folded Reload
"b.eq " DC_KERNEL_NO_MULT_33 "f\n"
DC_KERNEL_NO_MULT_4 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB111_29 Depth 2
// Child Loop BB111_32 Depth 2
// Child Loop BB111_20 Depth 2
// Child Loop BB111_22 Depth 3
// Child Loop BB111_25 Depth 4
// Child Loop BB111_7 Depth 2
// Child Loop BB111_9 Depth 3
// Child Loop BB111_15 Depth 3
"ldp q16, q15, [%[output_block_data]]\n"
"ldp q17, q3, [%[output_block_data], #32]\n"
"ldp q18, q4, [%[output_block_data], #64]\n"
"cmp w17, #4\n" // =4
"add %[output_block_data], x3, #96\n" // =96
"str %[output_block_data], [sp, #64]\n" // 8-byte Folded Spill
"b.ne " DC_KERNEL_NO_MULT_16 "f\n"
// %bb.5: // in Loop: Header=BB111_4 Depth=1
"ldp x24, x12, [sp, #80]\n" // 16-byte Folded Reload
"ldr x17, [sp, #32]\n" // 8-byte Folded Reload
"ldr x26, [sp, #72]\n" // 8-byte Folded Reload
"mov x9, xzr\n"
"lsl w12, w12, #3\n"
"lsl x12, x12, #2\n"
"add x16, x17, x12\n"
"ldr x17, [sp, #40]\n" // 8-byte Folded Reload
"stp q4, q3, [sp, #224]\n" // 32-byte Folded Spill
"str q15, [sp, #176]\n" // 16-byte Folded Spill
"add x12, x17, x12\n"
"stp x12, x16, [sp, #208]\n" // 16-byte Folded Spill
"b " DC_KERNEL_NO_MULT_7 "f\n"
DC_KERNEL_NO_MULT_6 ":\n" // in Loop: Header=BB111_7 Depth=2
"ldp q18, q17, [sp, #224]\n" // 32-byte Folded Reload
"add x9, x9, #1\n" // =1
"add x26, x26, #16\n" // =16
"cmp x9, #2\n" // =2
"add x24, x24, #4\n" // =4
"mov v16.16b, v15.16b\n"
"b.eq " DC_KERNEL_NO_MULT_3 "b\n"
DC_KERNEL_NO_MULT_7 ":\n" // Parent Loop BB111_4 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB111_9 Depth 3
// Child Loop BB111_15 Depth 3
"ldr q19, [%[bias_data]], #16\n"
"ldr x16, [sp, #264]\n" // 8-byte Folded Reload
"lsl x12, x9, #4\n"
"ldr w17, [sp, #344]\n" // 4-byte Folded Reload
"mov v31.16b, v19.16b\n"
"add %[output_block_data], x16, x12\n"
"ldr x16, [sp, #216]\n" // 8-byte Folded Reload
"ldr q22, [%[output_block_data]]\n"
"mov v8.16b, v19.16b\n"
"mov v9.16b, v19.16b\n"
"ldr q20, [x16, x12]\n"
"ldr x16, [sp, #208]\n" // 8-byte Folded Reload
"mov v10.16b, v19.16b\n"
"cmp w17, #1\n" // =1
"ldr q21, [x16, x12]\n"
"ldr x12, [sp, #328]\n" // 8-byte Folded Reload
"ldr q27, [%[output_block_data], x12]\n"
"ldr x12, [sp, #320]\n" // 8-byte Folded Reload
"ldr q26, [%[output_block_data], x12]\n"
"ldr x12, [sp, #312]\n" // 8-byte Folded Reload
".word 0x4e9a965f // sdot v31.4s, v18.16b, v26.16b\n"
"ldr q25, [%[output_block_data], x12]\n"
"ldr x12, [sp, #304]\n" // 8-byte Folded Reload
".word 0x4e9a9628 // sdot v8.4s, v17.16b, v26.16b\n"
".word 0x4e9a9609 // sdot v9.4s, v16.16b, v26.16b\n"
".word 0x4e99960a // sdot v10.4s, v16.16b, v25.16b\n"
"ldr q24, [%[output_block_data], x12]\n"
"ldr x12, [sp, #296]\n" // 8-byte Folded Reload
"ldr q23, [%[output_block_data], x12]\n"
"b.lt " DC_KERNEL_NO_MULT_11 "f\n"
// %bb.8: // in Loop: Header=BB111_7 Depth=2
"stp x24, x9, [sp, #280]\n" // 16-byte Folded Spill
"ldr w12, [sp, #344]\n" // 4-byte Folded Reload
"mov x17, x24\n"
"str x26, [sp, #272]\n" // 8-byte Folded Spill
"mov x22, x26\n"
"ldp x27, x24, [sp, #144]\n" // 16-byte Folded Reload
"ldp x26, %[filter_workspace], [sp, #128]\n" // 16-byte Folded Reload
"ldr x16, [sp, #120]\n" // 8-byte Folded Reload
"shl v28.4s, v16.4s, #8\n"
"shl v29.4s, v17.4s, #8\n"
"shl v30.4s, v18.4s, #8\n"
"mov v11.16b, v23.16b\n"
"mov v12.16b, v24.16b\n"
"mov v13.16b, v27.16b\n"
"mov v14.16b, v22.16b\n"
DC_KERNEL_NO_MULT_9 ":\n" // Parent Loop BB111_4 Depth=1
// Parent Loop BB111_7 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4e8e961f // sdot v31.4s, v16.16b, v14.16b\n"
".word 0x4e8d9608 // sdot v8.4s, v16.16b, v13.16b\n"
".word 0x4e999629 // sdot v9.4s, v17.16b, v25.16b\n"
".word 0x4e8d963f // sdot v31.4s, v17.16b, v13.16b\n"
".word 0x4e8c962a // sdot v10.4s, v17.16b, v12.16b\n"
".word 0x4e999648 // sdot v8.4s, v18.16b, v25.16b\n"
".word 0x4e8c9649 // sdot v9.4s, v18.16b, v12.16b\n"
"sqrdmulh v31.4s, v31.4s, v21.4s\n"
".word 0x4e8b964a // sdot v10.4s, v18.16b, v11.16b\n"
"sqrdmulh v8.4s, v8.4s, v21.4s\n"
"sqrdmulh v9.4s, v9.4s, v21.4s\n"
"sqrshl v31.4s, v31.4s, v20.4s\n"
"sqrdmulh v10.4s, v10.4s, v21.4s\n"
"sqrshl v8.4s, v8.4s, v20.4s\n"
"sqrshl v9.4s, v9.4s, v20.4s\n"
"sqxtn v31.4h, v31.4s\n"
"sqrshl v10.4s, v10.4s, v20.4s\n"
"sqxtn v9.4h, v9.4s\n"
"sqxtn2 v31.8h, v8.4s\n"
"sqxtn2 v9.8h, v10.4s\n"
"sqadd v31.8h, v31.8h, v0.8h\n"
"sqadd v8.8h, v9.8h, v0.8h\n"
"sqxtn v31.8b, v31.8h\n"
"sqxtn2 v31.16b, v8.8h\n"
"smax v31.16b, v31.16b, v5.16b\n"
"add %[output_block_data], x27, x17\n"
"smin v31.16b, v31.16b, v6.16b\n"
"str s31, [x23, x17]\n"
"st1 { v31.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x26, x17\n"
"st1 { v31.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x16, x17\n"
"st1 { v31.s }[3], [%[output_block_data]]\n"
"ldr %[output_block_data], [sp, #376]\n" // 8-byte Folded Reload
"mov v10.16b, v19.16b\n"
"mov v31.16b, v19.16b\n"
"mov v8.16b, v19.16b\n"
"ldr x9, [sp, #352]\n" // 8-byte Folded Reload
".word 0x4e99978a // sdot v10.4s, v28.16b, v25.16b\n"
".word 0x4e8e979f // sdot v31.4s, v28.16b, v14.16b\n"
".word 0x4e8d9788 // sdot v8.4s, v28.16b, v13.16b\n"
".word 0x4e8c97aa // sdot v10.4s, v29.16b, v12.16b\n"
"mov v9.16b, v19.16b\n"
".word 0x4e8d97bf // sdot v31.4s, v29.16b, v13.16b\n"
".word 0x4e9a97a8 // sdot v8.4s, v29.16b, v26.16b\n"
".word 0x4e8b97ca // sdot v10.4s, v30.16b, v11.16b\n"
"add %[output_block_data], x3, x22\n"
"rev32 v2.8h, v26.8h\n"
".word 0x4e9a9789 // sdot v9.4s, v28.16b, v26.16b\n"
".word 0x4e9a97df // sdot v31.4s, v30.16b, v26.16b\n"
".word 0x4e9997c8 // sdot v8.4s, v30.16b, v25.16b\n"
"sqrdmulh v26.4s, v10.4s, v21.4s\n"
"rev32 v15.8h, v22.8h\n"
"ldr q22, [%[output_block_data], #32]\n"
"add %[output_block_data], x9, x17\n"
"rev32 v4.8h, v24.8h\n"
".word 0x4e9997a9 // sdot v9.4s, v29.16b, v25.16b\n"
"sqrdmulh v24.4s, v8.4s, v21.4s\n"
"sqrshl v8.4s, v26.4s, v20.4s\n"
"ldr q26, [%[scratch_block_data], x22]\n"
"mov x9, %[scratch_block_data]\n"
"ldr %[scratch_block_data], [sp, #368]\n" // 8-byte Folded Reload
"mov v7.16b, v6.16b\n"
"mov v6.16b, v5.16b\n"
"rev32 v5.8h, v23.8h\n"
".word 0x4e8c97c9 // sdot v9.4s, v30.16b, v12.16b\n"
"sqrdmulh v23.4s, v31.4s, v21.4s\n"
"rev32 v3.8h, v25.8h\n"
"sqrdmulh v25.4s, v9.4s, v21.4s\n"
"sqrshl v23.4s, v23.4s, v20.4s\n"
"sqrshl v31.4s, v24.4s, v20.4s\n"
"sqrshl v24.4s, v25.4s, v20.4s\n"
"sqxtn v9.4h, v23.4s\n"
"rev32 v1.8h, v27.8h\n"
"sqxtn v10.4h, v24.4s\n"
"ldr q27, [x28, x22]\n"
"ldr q25, [%[scratch_block_data], x22]\n"
"ldr q24, [x11, x22]\n"
"ldr q23, [x10, x22]\n"
"sqxtn2 v9.8h, v31.4s\n"
"sqxtn2 v10.8h, v8.4s\n"
"sqadd v31.8h, v9.8h, v0.8h\n"
"sqadd v8.8h, v10.8h, v0.8h\n"
"sqxtn v31.8b, v31.8h\n"
"sqxtn2 v31.16b, v8.8h\n"
"smax v31.16b, v31.16b, v6.16b\n"
"smin v31.16b, v31.16b, v7.16b\n"
"str s31, [%[filter_workspace], x17]\n"
"st1 { v31.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], %[function_params], x17\n"
"st1 { v31.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x13, x17\n"
"mov v8.16b, v19.16b\n"
"st1 { v31.s }[3], [%[output_block_data]]\n"
"trn1 v31.8h, v15.8h, v22.8h\n"
"mov v9.16b, v19.16b\n"
"mov v10.16b, v19.16b\n"
"trn1 v1.8h, v1.8h, v27.8h\n"
"trn1 v2.8h, v2.8h, v26.8h\n"
".word 0x4e9f9608 // sdot v8.4s, v16.16b, v31.16b\n"
"mov v11.16b, v19.16b\n"
"trn1 v3.8h, v3.8h, v25.8h\n"
".word 0x4e819609 // sdot v9.4s, v16.16b, v1.16b\n"
".word 0x4e82960a // sdot v10.4s, v16.16b, v2.16b\n"
".word 0x4e819628 // sdot v8.4s, v17.16b, v1.16b\n"
"trn1 v4.8h, v4.8h, v24.8h\n"
".word 0x4e83960b // sdot v11.4s, v16.16b, v3.16b\n"
".word 0x4e829629 // sdot v9.4s, v17.16b, v2.16b\n"
".word 0x4e83962a // sdot v10.4s, v17.16b, v3.16b\n"
".word 0x4e829648 // sdot v8.4s, v18.16b, v2.16b\n"
"trn1 v5.8h, v5.8h, v23.8h\n"
".word 0x4e84962b // sdot v11.4s, v17.16b, v4.16b\n"
".word 0x4e839649 // sdot v9.4s, v18.16b, v3.16b\n"
".word 0x4e84964a // sdot v10.4s, v18.16b, v4.16b\n"
"sqrdmulh v8.4s, v8.4s, v21.4s\n"
".word 0x4e85964b // sdot v11.4s, v18.16b, v5.16b\n"
"sqrdmulh v9.4s, v9.4s, v21.4s\n"
"sqrdmulh v10.4s, v10.4s, v21.4s\n"
"sqrshl v8.4s, v8.4s, v20.4s\n"
"sqrdmulh v11.4s, v11.4s, v21.4s\n"
"sqrshl v9.4s, v9.4s, v20.4s\n"
"sqrshl v10.4s, v10.4s, v20.4s\n"
"sqxtn v8.4h, v8.4s\n"
"sqrshl v11.4s, v11.4s, v20.4s\n"
"sqxtn v10.4h, v10.4s\n"
"sqxtn2 v8.8h, v9.4s\n"
"sqxtn2 v10.8h, v11.4s\n"
"sqadd v8.8h, v8.8h, v0.8h\n"
"sqadd v9.8h, v10.8h, v0.8h\n"
"sqxtn v8.8b, v8.8h\n"
"sqxtn2 v8.16b, v9.8h\n"
"mov v9.16b, v19.16b\n"
"ldr %[scratch_block_data], [sp, #360]\n" // 8-byte Folded Reload
"mov v10.16b, v19.16b\n"
"mov v11.16b, v19.16b\n"
".word 0x4e9f9789 // sdot v9.4s, v28.16b, v31.16b\n"
"mov v12.16b, v19.16b\n"
".word 0x4e81978a // sdot v10.4s, v28.16b, v1.16b\n"
".word 0x4e82978b // sdot v11.4s, v28.16b, v2.16b\n"
".word 0x4e8197a9 // sdot v9.4s, v29.16b, v1.16b\n"
"smax v8.16b, v8.16b, v6.16b\n"
".word 0x4e83978c // sdot v12.4s, v28.16b, v3.16b\n"
".word 0x4e8297aa // sdot v10.4s, v29.16b, v2.16b\n"
".word 0x4e8397ab // sdot v11.4s, v29.16b, v3.16b\n"
".word 0x4e8297c9 // sdot v9.4s, v30.16b, v2.16b\n"
"add %[output_block_data], x21, x17\n"
"smin v8.16b, v8.16b, v7.16b\n"
".word 0x4e8497ac // sdot v12.4s, v29.16b, v4.16b\n"
".word 0x4e8397ca // sdot v10.4s, v30.16b, v3.16b\n"
".word 0x4e8497cb // sdot v11.4s, v30.16b, v4.16b\n"
"sqrdmulh v1.4s, v9.4s, v21.4s\n"
"str s8, [%[scratch_block_data], x17]\n"
"st1 { v8.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x20, x17\n"
".word 0x4e8597cc // sdot v12.4s, v30.16b, v5.16b\n"
"sqrdmulh v2.4s, v10.4s, v21.4s\n"
"sqrdmulh v3.4s, v11.4s, v21.4s\n"
"sqrshl v1.4s, v1.4s, v20.4s\n"
"st1 { v8.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x19, x17\n"
"sqrdmulh v4.4s, v12.4s, v21.4s\n"
"sqrshl v2.4s, v2.4s, v20.4s\n"
"sqrshl v3.4s, v3.4s, v20.4s\n"
"sqxtn v1.4h, v1.4s\n"
"st1 { v8.s }[3], [%[output_block_data]]\n"
"sqrshl v4.4s, v4.4s, v20.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqxtn2 v1.8h, v2.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v1.8h, v1.8h, v0.8h\n"
"sqadd v2.8h, v3.8h, v0.8h\n"
"sqxtn v1.8b, v1.8h\n"
"mov v5.16b, v6.16b\n"
"sqxtn2 v1.16b, v2.8h\n"
"smax v1.16b, v1.16b, v5.16b\n"
"add %[output_block_data], x15, x17\n"
"smin v1.16b, v1.16b, v7.16b\n"
"str s1, [x25, x17]\n"
"st1 { v1.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x14, x17\n"
"mov v31.16b, v19.16b\n"
"mov v8.16b, v19.16b\n"
"mov v9.16b, v19.16b\n"
"mov v10.16b, v19.16b\n"
"mov %[scratch_block_data], x9\n"
"mov v6.16b, v7.16b\n"
"st1 { v1.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x6, x17\n"
"subs w12, w12, #1\n" // =1
"add x22, x22, #32\n" // =32
".word 0x4e9a965f // sdot v31.4s, v18.16b, v26.16b\n"
".word 0x4e9a9628 // sdot v8.4s, v17.16b, v26.16b\n"
".word 0x4e9a9609 // sdot v9.4s, v16.16b, v26.16b\n"
".word 0x4e99960a // sdot v10.4s, v16.16b, v25.16b\n"
"add x17, x17, x24\n"
"mov v11.16b, v23.16b\n"
"mov v12.16b, v24.16b\n"
"mov v13.16b, v27.16b\n"
"mov v14.16b, v22.16b\n"
"st1 { v1.s }[3], [%[output_block_data]]\n"
"b.ne " DC_KERNEL_NO_MULT_9 "b\n"
// %bb.10: // in Loop: Header=BB111_7 Depth=2
"ldr x12, [sp, #376]\n" // 8-byte Folded Reload
"ldp d14, d7, [sp, #160]\n" // 16-byte Folded Reload
"ldr q15, [sp, #176]\n" // 16-byte Folded Reload
"ldp x24, x9, [sp, #280]\n" // 16-byte Folded Reload
"add %[output_block_data], x12, x22\n"
"ldr x22, [sp, #200]\n" // 8-byte Folded Reload
"ldr x26, [sp, #272]\n" // 8-byte Folded Reload
"add x12, x23, x17\n"
"mov w1, #4\n"
"ldr w17, [sp, #348]\n" // 4-byte Folded Reload
"cmp w17, #0\n" // =0
"b.gt " DC_KERNEL_NO_MULT_12 "f\n"
"b " DC_KERNEL_NO_MULT_6 "b\n"
DC_KERNEL_NO_MULT_11 ":\n" // in Loop: Header=BB111_7 Depth=2
"ldr x12, [sp, #112]\n" // 8-byte Folded Reload
"add x12, x12, x9, lsl #2\n"
"ldr w17, [sp, #348]\n" // 4-byte Folded Reload
"cmp w17, #0\n" // =0
"b.le " DC_KERNEL_NO_MULT_6 "b\n"
DC_KERNEL_NO_MULT_12 ":\n" // in Loop: Header=BB111_7 Depth=2
"ldr w17, [sp, #348]\n" // 4-byte Folded Reload
"movi v28.16b, #0\n"
"movi v29.16b, #0\n"
"movi v30.16b, #0\n"
"cmp w17, #3\n" // =3
"movi v11.16b, #0\n"
"movi v12.16b, #0\n"
"movi v13.16b, #0\n"
"b.lt " DC_KERNEL_NO_MULT_14 "f\n"
// %bb.13: // in Loop: Header=BB111_7 Depth=2
"add x17, %[output_block_data], #32\n" // =32
"ldp x16, %[output_block_data], [sp, #320]\n" // 16-byte Folded Reload
"ldr q13, [x17]\n"
"ldr %[scratch_block_data], [sp, #96]\n" // 8-byte Folded Reload
"ldr q12, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #312]\n" // 8-byte Folded Reload
"ldr q11, [x17, x16]\n"
"ldr q30, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #304]\n" // 8-byte Folded Reload
"ldr q29, [x17, %[output_block_data]]\n"
"ldr %[output_block_data], [sp, #296]\n" // 8-byte Folded Reload
"ldr q28, [x17, %[output_block_data]]\n"
DC_KERNEL_NO_MULT_14 ":\n" // in Loop: Header=BB111_7 Depth=2
"ldr w17, [sp, #348]\n" // 4-byte Folded Reload
DC_KERNEL_NO_MULT_15 ":\n" // Parent Loop BB111_4 Depth=1
// Parent Loop BB111_7 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4e96961f // sdot v31.4s, v16.16b, v22.16b\n"
".word 0x4e9b9608 // sdot v8.4s, v16.16b, v27.16b\n"
".word 0x4e999629 // sdot v9.4s, v17.16b, v25.16b\n"
".word 0x4e9b963f // sdot v31.4s, v17.16b, v27.16b\n"
".word 0x4e98962a // sdot v10.4s, v17.16b, v24.16b\n"
".word 0x4e999648 // sdot v8.4s, v18.16b, v25.16b\n"
".word 0x4e989649 // sdot v9.4s, v18.16b, v24.16b\n"
"sqrdmulh v1.4s, v31.4s, v21.4s\n"
".word 0x4e97964a // sdot v10.4s, v18.16b, v23.16b\n"
"sqrdmulh v2.4s, v8.4s, v21.4s\n"
"sqrdmulh v3.4s, v9.4s, v21.4s\n"
"sqrshl v1.4s, v1.4s, v20.4s\n"
"sqrdmulh v4.4s, v10.4s, v21.4s\n"
"sqrshl v2.4s, v2.4s, v20.4s\n"
"sqrshl v3.4s, v3.4s, v20.4s\n"
"sqxtn v1.4h, v1.4s\n"
"sqrshl v4.4s, v4.4s, v20.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqxtn2 v1.8h, v2.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v1.8h, v1.8h, v0.8h\n"
"sqadd v2.8h, v3.8h, v0.8h\n"
"sqxtn v1.8b, v1.8h\n"
"sqxtn2 v1.16b, v2.8h\n"
"smax v1.16b, v1.16b, v5.16b\n"
"add %[output_block_data], x12, x22\n"
"smin v1.16b, v1.16b, v6.16b\n"
"ushr v26.4s, v26.4s, #8\n"
"ushr v25.4s, v25.4s, #8\n"
"str s1, [x12]\n"
"st1 { v1.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x12, x5\n"
"ushr v22.4s, v22.4s, #8\n"
"ushr v27.4s, v27.4s, #8\n"
"sli v26.4s, v11.4s, #24\n"
"ushr v24.4s, v24.4s, #8\n"
"ushr v23.4s, v23.4s, #8\n"
"sli v25.4s, v30.4s, #24\n"
"mov v31.16b, v19.16b\n"
"mov v8.16b, v19.16b\n"
"mov v9.16b, v19.16b\n"
"mov v10.16b, v19.16b\n"
"st1 { v1.s }[2], [%[output_block_data]]\n"
"add %[output_block_data], x12, x8\n"
"subs w17, w17, #1\n" // =1
"sli v22.4s, v13.4s, #24\n"
"ushr v13.4s, v13.4s, #8\n"
"ushr v11.4s, v11.4s, #8\n"
"sli v27.4s, v12.4s, #24\n"
"ushr v12.4s, v12.4s, #8\n"
"ushr v30.4s, v30.4s, #8\n"
"sli v24.4s, v29.4s, #24\n"
"ushr v29.4s, v29.4s, #8\n"
"sli v23.4s, v28.4s, #24\n"
"ushr v28.4s, v28.4s, #8\n"
".word 0x4e9a965f // sdot v31.4s, v18.16b, v26.16b\n"
".word 0x4e9a9628 // sdot v8.4s, v17.16b, v26.16b\n"
".word 0x4e9a9609 // sdot v9.4s, v16.16b, v26.16b\n"
"add x12, x12, x7\n"
".word 0x4e99960a // sdot v10.4s, v16.16b, v25.16b\n"
"st1 { v1.s }[3], [%[output_block_data]]\n"
"b.ne " DC_KERNEL_NO_MULT_15 "b\n"
"b " DC_KERNEL_NO_MULT_6 "b\n"
DC_KERNEL_NO_MULT_16 ":\n" // in Loop: Header=BB111_4 Depth=1
"cmp w17, #1\n" // =1
"add x9, %[bias_data], #32\n" // =32
"b.lt " DC_KERNEL_NO_MULT_2 "b\n"
// %bb.17: // in Loop: Header=BB111_4 Depth=1
"ldr w12, [sp, #340]\n" // 4-byte Folded Reload
"cmp w12, #1\n" // =1
"b.lt " DC_KERNEL_NO_MULT_27 "f\n"
// %bb.18: // in Loop: Header=BB111_4 Depth=1
"ldr x12, [sp, #88]\n" // 8-byte Folded Reload
"ldp x17, %[output_block_data], [sp, #32]\n" // 16-byte Folded Reload
"str x9, [sp, #288]\n" // 8-byte Folded Spill
"ldp q19, q20, [%[bias_data]]\n"
"lsl w12, w12, #3\n"
"lsl x12, x12, #2\n"
"add x17, x17, x12\n"
"add x12, %[output_block_data], x12\n"
"ldp q21, q22, [x17]\n"
"ldp q23, q24, [x12]\n"
"ldr x9, [sp, #264]\n" // 8-byte Folded Reload
"ldr x27, [sp, #112]\n" // 8-byte Folded Reload
"mov w26, wzr\n"
"b " DC_KERNEL_NO_MULT_20 "f\n"
DC_KERNEL_NO_MULT_19 ":\n" // in Loop: Header=BB111_20 Depth=2
"ldr w12, [sp, #108]\n" // 4-byte Folded Reload
"ldr x22, [sp, #200]\n" // 8-byte Folded Reload
"add w26, w26, #1\n" // =1
"cmp w26, w12\n"
"add x27, x27, x22\n"
"b.eq " DC_KERNEL_NO_MULT_26 "f\n"
DC_KERNEL_NO_MULT_20 ":\n" // Parent Loop BB111_4 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB111_22 Depth 3
// Child Loop BB111_25 Depth 4
"ldp x16, %[output_block_data], [sp, #320]\n" // 16-byte Folded Reload
"ldp q25, q26, [x9]\n"
"mov w12, wzr\n"
"mov x17, x9\n"
"add %[scratch_block_data], x9, %[output_block_data]\n"
"add %[output_block_data], x9, x16\n"
"ldp q27, q28, [%[scratch_block_data]]\n"
"ldp q29, q30, [%[output_block_data]]\n"
"mov x9, %[scratch_block_data]\n"
"mov x22, x27\n"
"b " DC_KERNEL_NO_MULT_22 "f\n"
DC_KERNEL_NO_MULT_21 ":\n" // in Loop: Header=BB111_22 Depth=3
"ldr w16, [sp, #340]\n" // 4-byte Folded Reload
"add w12, w12, #1\n" // =1
"mov x17, %[scratch_block_data]\n"
"cmp w12, w16\n"
"b.eq " DC_KERNEL_NO_MULT_19 "b\n"
DC_KERNEL_NO_MULT_22 ":\n" // Parent Loop BB111_4 Depth=1
// Parent Loop BB111_20 Depth=2
// => This Loop Header: Depth=3
// Child Loop BB111_25 Depth 4
"ldr w16, [sp, #344]\n" // 4-byte Folded Reload
"add %[scratch_block_data], x17, #32\n" // =32
"cmp w12, w16\n"
"ldr w16, [sp, #348]\n" // 4-byte Folded Reload
"csel w3, w16, w1, eq\n"
"cmp w3, #3\n" // =3
"b.ge " DC_KERNEL_NO_MULT_24 "f\n"
// %bb.23: // in Loop: Header=BB111_22 Depth=3
"movi v31.16b, #0\n"
"cmp w3, #1\n" // =1
"movi v8.16b, #0\n"
"movi v9.16b, #0\n"
"movi v11.16b, #0\n"
"movi v12.16b, #0\n"
"movi v10.16b, #0\n"
"b.ge " DC_KERNEL_NO_MULT_25 "f\n"
"b " DC_KERNEL_NO_MULT_21 "b\n"
DC_KERNEL_NO_MULT_24 ":\n" // in Loop: Header=BB111_22 Depth=3
"ldr x24, [sp, #328]\n" // 8-byte Folded Reload
"mov x16, x11\n"
"mov x11, x10\n"
"mov x10, %[scratch_block_data]\n"
"add x24, %[scratch_block_data], x24\n"
"ldr %[scratch_block_data], [sp, #320]\n" // 8-byte Folded Reload
"ldp q10, q9, [x17, #32]\n"
"ldp q12, q8, [x24]\n"
"mov x23, x15\n"
"add %[scratch_block_data], x10, x0\n"
"ldp q11, q31, [%[scratch_block_data]]\n"
"mov x15, x14\n"
"mov x14, x6\n"
"mov %[bias_data], x13\n"
"mov x13, x21\n"
"mov x21, x20\n"
"mov x20, x19\n"
"mov x19, x25\n"
"mov x19, x20\n"
"mov x20, x21\n"
"mov x21, x13\n"
"mov x13, %[bias_data]\n"
"mov x14, x15\n"
"mov x15, x23\n"
"mov %[scratch_block_data], x10\n"
"mov x10, x11\n"
"mov x11, x16\n"
DC_KERNEL_NO_MULT_25 ":\n" // Parent Loop BB111_4 Depth=1
// Parent Loop BB111_20 Depth=2
// Parent Loop BB111_22 Depth=3
// => This Inner Loop Header: Depth=4
"mov v1.16b, v19.16b\n"
"mov v2.16b, v20.16b\n"
".word 0x4e999601 // sdot v1.4s, v16.16b, v25.16b\n"
".word 0x4e9a95e2 // sdot v2.4s, v15.16b, v26.16b\n"
".word 0x4e9b9621 // sdot v1.4s, v17.16b, v27.16b\n"
".word 0x4e9c9462 // sdot v2.4s, v3.16b, v28.16b\n"
".word 0x4e9d9641 // sdot v1.4s, v18.16b, v29.16b\n"
".word 0x4e9e9482 // sdot v2.4s, v4.16b, v30.16b\n"
"sqrdmulh v1.4s, v1.4s, v23.4s\n"
"sqrdmulh v2.4s, v2.4s, v24.4s\n"
"sqrshl v1.4s, v1.4s, v21.4s\n"
"sqrshl v2.4s, v2.4s, v22.4s\n"
"sqxtn v1.4h, v1.4s\n"
"sqxtn2 v1.8h, v2.4s\n"
"sqadd v1.8h, v1.8h, v0.8h\n"
"sqxtn v1.8b, v1.8h\n"
"smax v1.8b, v1.8b, v7.8b\n"
"ushr v25.4s, v25.4s, #8\n"
"ushr v26.4s, v26.4s, #8\n"
"ushr v27.4s, v27.4s, #8\n"
"ushr v28.4s, v28.4s, #8\n"
"ushr v29.4s, v29.4s, #8\n"
"ushr v30.4s, v30.4s, #8\n"
"smin v1.8b, v1.8b, v14.8b\n"
"subs w3, w3, #1\n" // =1
"sli v25.4s, v10.4s, #24\n"
"ushr v10.4s, v10.4s, #8\n"
"sli v26.4s, v9.4s, #24\n"
"ushr v9.4s, v9.4s, #8\n"
"sli v27.4s, v12.4s, #24\n"
"ushr v12.4s, v12.4s, #8\n"
"sli v28.4s, v8.4s, #24\n"
"ushr v8.4s, v8.4s, #8\n"
"sli v29.4s, v11.4s, #24\n"
"ushr v11.4s, v11.4s, #8\n"
"sli v30.4s, v31.4s, #24\n"
"ushr v31.4s, v31.4s, #8\n"
"str d1, [x22]\n"
"add x22, x22, x7\n"
"b.ne " DC_KERNEL_NO_MULT_25 "b\n"
"b " DC_KERNEL_NO_MULT_21 "b\n"
DC_KERNEL_NO_MULT_26 ":\n" // in Loop: Header=BB111_4 Depth=1
"ldr %[bias_data], [sp, #288]\n" // 8-byte Folded Reload
"ldr x23, [sp, #24]\n" // 8-byte Folded Reload
"ldr %[scratch_block_data], [sp, #96]\n" // 8-byte Folded Reload
"b " DC_KERNEL_NO_MULT_3 "b\n"
DC_KERNEL_NO_MULT_27 ":\n" // in Loop: Header=BB111_4 Depth=1
"ldr w12, [sp, #20]\n" // 4-byte Folded Reload
"cmp w17, #2\n" // =2
"b.hs " DC_KERNEL_NO_MULT_29 "f\n"
// %bb.28: // in Loop: Header=BB111_4 Depth=1
"mov w12, wzr\n"
"b " DC_KERNEL_NO_MULT_31 "f\n"
DC_KERNEL_NO_MULT_29 ":\n" // Parent Loop BB111_4 Depth=1
// => This Inner Loop Header: Depth=2
"subs w12, w12, #2\n" // =2
"b.ne " DC_KERNEL_NO_MULT_29 "b\n"
// %bb.30: // in Loop: Header=BB111_4 Depth=1
"ldr w12, [sp, #20]\n" // 4-byte Folded Reload
"cmp w17, w12\n"
"b.eq " DC_KERNEL_NO_MULT_2 "b\n"
DC_KERNEL_NO_MULT_31 ":\n" // in Loop: Header=BB111_4 Depth=1
"sub w12, w17, w12\n"
DC_KERNEL_NO_MULT_32 ":\n" // Parent Loop BB111_4 Depth=1
// => This Inner Loop Header: Depth=2
"subs w12, w12, #1\n" // =1
"b.ne " DC_KERNEL_NO_MULT_32 "b\n"
"b " DC_KERNEL_NO_MULT_2 "b\n"
DC_KERNEL_NO_MULT_33 ":\n"
// Compiled intrinsics total stack 528, now 384 for spillage only.
"add sp, sp, #384\n" // =528
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_NO_MULT_1
#undef DC_KERNEL_NO_MULT_2
#undef DC_KERNEL_NO_MULT_3
#undef DC_KERNEL_NO_MULT_4
#undef DC_KERNEL_NO_MULT_5
#undef DC_KERNEL_NO_MULT_6
#undef DC_KERNEL_NO_MULT_7
#undef DC_KERNEL_NO_MULT_8
#undef DC_KERNEL_NO_MULT_9
#undef DC_KERNEL_NO_MULT_10
#undef DC_KERNEL_NO_MULT_11
#undef DC_KERNEL_NO_MULT_12
#undef DC_KERNEL_NO_MULT_13
#undef DC_KERNEL_NO_MULT_14
#undef DC_KERNEL_NO_MULT_15
#undef DC_KERNEL_NO_MULT_16
#undef DC_KERNEL_NO_MULT_17
#undef DC_KERNEL_NO_MULT_18
#undef DC_KERNEL_NO_MULT_19
#undef DC_KERNEL_NO_MULT_20
#undef DC_KERNEL_NO_MULT_21
#undef DC_KERNEL_NO_MULT_22
#undef DC_KERNEL_NO_MULT_23
#undef DC_KERNEL_NO_MULT_24
#undef DC_KERNEL_NO_MULT_25
#undef DC_KERNEL_NO_MULT_26
#undef DC_KERNEL_NO_MULT_27
#undef DC_KERNEL_NO_MULT_28
#undef DC_KERNEL_NO_MULT_29
#undef DC_KERNEL_NO_MULT_30
#undef DC_KERNEL_NO_MULT_31
#undef DC_KERNEL_NO_MULT_32
#undef DC_KERNEL_NO_MULT_33
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/2> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_NO_MULT_STRIDE_1 "1"
#define DC_KERNEL_NO_MULT_STRIDE_2 "2"
#define DC_KERNEL_NO_MULT_STRIDE_3 "3"
#define DC_KERNEL_NO_MULT_STRIDE_4 "4"
#define DC_KERNEL_NO_MULT_STRIDE_5 "5"
#define DC_KERNEL_NO_MULT_STRIDE_6 "6"
#define DC_KERNEL_NO_MULT_STRIDE_7 "7"
#define DC_KERNEL_NO_MULT_STRIDE_8 "8"
#define DC_KERNEL_NO_MULT_STRIDE_9 "9"
#define DC_KERNEL_NO_MULT_STRIDE_10 "10"
#define DC_KERNEL_NO_MULT_STRIDE_11 "11"
#define DC_KERNEL_NO_MULT_STRIDE_12 "12"
#define DC_KERNEL_NO_MULT_STRIDE_13 "13"
#define DC_KERNEL_NO_MULT_STRIDE_14 "14"
#define DC_KERNEL_NO_MULT_STRIDE_15 "15"
#define DC_KERNEL_NO_MULT_STRIDE_16 "16"
#define DC_KERNEL_NO_MULT_STRIDE_17 "17"
#define DC_KERNEL_NO_MULT_STRIDE_18 "18"
#define DC_KERNEL_NO_MULT_STRIDE_19 "19"
#define DC_KERNEL_NO_MULT_STRIDE_20 "20"
#define DC_KERNEL_NO_MULT_STRIDE_21 "21"
#define DC_KERNEL_NO_MULT_STRIDE_22 "22"
#define DC_KERNEL_NO_MULT_STRIDE_23 "23"
#define DC_KERNEL_NO_MULT_STRIDE_24 "24"
#define DC_KERNEL_NO_MULT_STRIDE_25 "25"
#define DC_KERNEL_NO_MULT_STRIDE_26 "26"
#define DC_KERNEL_NO_MULT_STRIDE_27 "27"
#define DC_KERNEL_NO_MULT_STRIDE_28 "28"
#define DC_KERNEL_NO_MULT_STRIDE_29 "29"
#define DC_KERNEL_NO_MULT_STRIDE_30 "30"
#define DC_KERNEL_NO_MULT_STRIDE_31 "31"
#define DC_KERNEL_NO_MULT_STRIDE_32 "32"
#define DC_KERNEL_NO_MULT_STRIDE_33 "33"
#define DC_KERNEL_NO_MULT_STRIDE_34 "34"
#define DC_KERNEL_NO_MULT_STRIDE_35 "35"
asm volatile(
// Compiled code used block of 176 for spill out of total stack of 320.
"sub sp, sp, #176\n" // =320
"ldr w23, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"str %[scratch_block_data], [sp, #168]\n" // 8-byte Folded Spill
"cmp w23, #1\n" // =1
"b.lt " DC_KERNEL_NO_MULT_STRIDE_35 "f\n"
// %bb.1:
"ldr x8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL) "]\n"
"ldpsw x11, x12, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldp w13, w0, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldr w5, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"str x8, [sp, #144]\n" // 8-byte Folded Spill
"ldr x8, [%[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT_PER_CHANNEL) "]\n"
"ldr x14, [%[function_params]]\n"
"str w5, [sp, #164]\n" // 4-byte Folded Spill
"add x15, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "\n" // =40
"str x8, [sp, #136]\n" // 8-byte Folded Spill
"add x16, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "\n" // =44
"add x17, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"ldrsw x8, [%[function_params], #" STR(DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldp w5, w4, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"ld1r { v0.8h }, [x17]\n"
"ld1r { v1.8b }, [x15]\n"
"ld1r { v2.8b }, [x16]\n"
"cmp w5, #1\n" // =1
"ccmp w0, w13, #0, eq\n"
"lsl w15, w14, #1\n"
"csel w6, w0, w13, lt\n"
"lsl x8, x8, #5\n"
"sxtw x19, w14\n"
"sxtw x22, w15\n"
"bic w14, w6, w6, asr #31\n"
"str x8, [sp, #152]\n" // 8-byte Folded Spill
"lsl x7, x12, #1\n"
"madd x8, x22, x14, %[output_block_data]\n"
"mov x9, xzr\n"
"mov x10, xzr\n"
"lsl x20, x12, #2\n"
"add x21, x7, x12\n"
"sub x14, x13, x14\n"
"stp x8, x23, [sp, #48]\n" // 16-byte Folded Spill
"add x8, x8, #4\n" // =4
"str w4, [sp, #44]\n" // 4-byte Folded Spill
"str %[scratch_block_data], [sp, #32]\n" // 8-byte Folded Spill
"str x14, [sp, #128]\n" // 8-byte Folded Spill
"str x8, [sp, #8]\n" // 8-byte Folded Spill
// implicit-def: $q5
// implicit-def: $q21
// implicit-def: $q19
// implicit-def: $q16
// implicit-def: $q20
// implicit-def: $q3
// implicit-def: $q11
// implicit-def: $q13
// implicit-def: $q14
// implicit-def: $q15
// implicit-def: $q6
"b " DC_KERNEL_NO_MULT_STRIDE_4 "f\n"
DC_KERNEL_NO_MULT_STRIDE_2 ":\n" // in Loop: Header=BB112_4 Depth=1
"add x27, %[bias_data], #32\n" // =32
"mov v19.16b, v12.16b\n"
"mov v3.16b, v9.16b\n"
"mov v5.16b, v10.16b\n"
"mov v20.16b, v7.16b\n"
DC_KERNEL_NO_MULT_STRIDE_3 ":\n" // in Loop: Header=BB112_4 Depth=1
"add x10, x10, #1\n" // =1
"cmp x10, x23\n"
"add x9, x9, #8\n" // =8
"mov %[bias_data], x27\n"
"b.eq " DC_KERNEL_NO_MULT_STRIDE_35 "f\n"
DC_KERNEL_NO_MULT_STRIDE_4 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB112_30 Depth 2
// Child Loop BB112_21 Depth 2
// Child Loop BB112_7 Depth 2
// Child Loop BB112_9 Depth 2
// Child Loop BB112_12 Depth 2
// Child Loop BB112_26 Depth 2
"ldr w8, [sp, #164]\n" // 4-byte Folded Reload
"add w14, w10, w10, lsl #1\n"
"lsl w14, w14, #5\n"
"add x26, %[filter_workspace], x14\n"
"cmp w8, #2\n" // =2
"ldr x8, [sp, #168]\n" // 8-byte Folded Reload
"ldr x14, [sp, #152]\n" // 8-byte Folded Reload
"nop\n"
"madd x28, x10, x14, x8\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_14 "f\n"
// %bb.5: // in Loop: Header=BB112_4 Depth=1
"ldr x8, [sp, #136]\n" // 8-byte Folded Reload
"ubfx x14, x9, #3, #29\n"
"lsl w15, w10, #3\n"
"lsl x27, x14, #3\n"
"lsl x14, x15, #2\n"
"add x24, x8, x14\n"
"ldr x8, [sp, #144]\n" // 8-byte Folded Reload
"ldr q22, [x26]\n"
"ldr q23, [x26, #32]\n"
"ldr q24, [x26, #64]\n"
"add x14, x8, x14\n"
"ldr x8, [sp, #48]\n" // 8-byte Folded Reload
"ldr q25, [%[bias_data]]\n"
"ldr q31, [x28]\n"
"ldr q8, [x28, x12]\n"
"ldr q30, [x28, x7]\n"
"ldr q29, [x28, x21]\n"
"ldr q26, [x24]\n"
"ldr q27, [x14]\n"
"ldr q28, [x28, x20]\n"
"add x25, x8, x27\n"
"cmp w6, #1\n" // =1
"add %[function_params], %[output_block_data], x15\n"
"mov v12.16b, v19.16b\n"
"mov v7.16b, v20.16b\n"
"b.lt " DC_KERNEL_NO_MULT_STRIDE_23 "f\n"
// %bb.6: // in Loop: Header=BB112_4 Depth=1
"mov v4.16b, v21.16b\n"
"mov x8, %[filter_workspace]\n"
"mov w15, wzr\n"
"mov x16, xzr\n"
"add x17, x28, #32\n" // =32
"mov x23, x6\n"
"mov v17.16b, v30.16b\n"
DC_KERNEL_NO_MULT_STRIDE_7 ":\n" // Parent Loop BB112_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v18.16b, v25.16b\n"
"mov v19.16b, v25.16b\n"
".word 0x4e9f96d2 // sdot v18.4s, v22.16b, v31.16b\n"
".word 0x4e9196d3 // sdot v19.4s, v22.16b, v17.16b\n"
".word 0x4e8896f2 // sdot v18.4s, v23.16b, v8.16b\n"
".word 0x4e9d96f3 // sdot v19.4s, v23.16b, v29.16b\n"
".word 0x4e919712 // sdot v18.4s, v24.16b, v17.16b\n"
".word 0x4e9c9713 // sdot v19.4s, v24.16b, v28.16b\n"
"sqrdmulh v18.4s, v18.4s, v27.4s\n"
"and %[scratch_block_data], x16, #0xffffffe0\n"
"sqrdmulh v19.4s, v19.4s, v27.4s\n"
"sqrshl v18.4s, v18.4s, v26.4s\n"
"add %[scratch_block_data], x17, x0\n"
"sqrshl v19.4s, v19.4s, v26.4s\n"
"sqxtn v18.4h, v18.4s\n"
"rev32 v20.8h, v31.8h\n"
"rev32 v21.8h, v8.8h\n"
"rev32 v9.8h, v30.8h\n"
"rev32 v10.8h, v29.8h\n"
"ldr q31, [%[scratch_block_data]]\n"
"ldr q8, [%[scratch_block_data], x12]\n"
"ldr q30, [%[scratch_block_data], x7]\n"
"ldr q29, [%[scratch_block_data], x21]\n"
"rev32 v17.8h, v28.8h\n"
"ldr q28, [%[scratch_block_data], x20]\n"
"sqxtn2 v18.8h, v19.4s\n"
"sqadd v18.8h, v18.8h, v0.8h\n"
"sqxtn v18.8b, v18.8h\n"
"add %[filter_workspace], %[function_params], w15, sxtw\n"
"smax v18.8b, v18.8b, v1.8b\n"
"add %[scratch_block_data], %[filter_workspace], x11\n"
"smin v18.8b, v18.8b, v2.8b\n"
"mov v11.16b, v25.16b\n"
"str s18, [%[filter_workspace]]\n"
"st1 { v18.s }[1], [%[scratch_block_data]]\n"
"trn1 v18.8h, v20.8h, v31.8h\n"
"mov v19.16b, v25.16b\n"
"trn1 v20.8h, v21.8h, v8.8h\n"
"trn1 v21.8h, v9.8h, v30.8h\n"
".word 0x4e9296cb // sdot v11.4s, v22.16b, v18.16b\n"
"trn1 v9.8h, v10.8h, v29.8h\n"
".word 0x4e9596d3 // sdot v19.4s, v22.16b, v21.16b\n"
".word 0x4e9496eb // sdot v11.4s, v23.16b, v20.16b\n"
"trn1 v17.8h, v17.8h, v28.8h\n"
".word 0x4e8996f3 // sdot v19.4s, v23.16b, v9.16b\n"
".word 0x4e95970b // sdot v11.4s, v24.16b, v21.16b\n"
".word 0x4e919713 // sdot v19.4s, v24.16b, v17.16b\n"
"sqrdmulh v17.4s, v11.4s, v27.4s\n"
"sqrdmulh v18.4s, v19.4s, v27.4s\n"
"sqrshl v17.4s, v17.4s, v26.4s\n"
"sqrshl v18.4s, v18.4s, v26.4s\n"
"sqxtn v17.4h, v17.4s\n"
"sqxtn2 v17.8h, v18.4s\n"
"sqadd v17.8h, v17.8h, v0.8h\n"
"sqxtn v17.8b, v17.8h\n"
"add %[filter_workspace], x1, x19\n"
"smax v17.8b, v17.8b, v1.8b\n"
"add %[scratch_block_data], %[filter_workspace], x11\n"
"smin v17.8b, v17.8b, v2.8b\n"
"add x16, x16, #32\n" // =32
"subs x23, x23, #1\n" // =1
"str s17, [%[filter_workspace]]\n"
"st1 { v17.s }[1], [%[scratch_block_data]]\n"
"add w15, w15, w22\n"
"mov v17.16b, v30.16b\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_7 "b\n"
// %bb.8: // in Loop: Header=BB112_4 Depth=1
"mov v6.16b, v31.16b\n"
"mov v15.16b, v8.16b\n"
"mov v14.16b, v30.16b\n"
"mov v13.16b, v29.16b\n"
"mov v11.16b, v28.16b\n"
"mov w15, w6\n"
"mov %[filter_workspace], x8\n"
"mov v21.16b, v4.16b\n"
"cmp w15, w13\n"
"ldr x15, [sp, #128]\n" // 8-byte Folded Reload
"b.ge " DC_KERNEL_NO_MULT_STRIDE_10 "f\n"
DC_KERNEL_NO_MULT_STRIDE_9 ":\n" // Parent Loop BB112_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v9.16b, v25.16b\n"
"mov v10.16b, v25.16b\n"
".word 0x4e9f96c9 // sdot v9.4s, v22.16b, v31.16b\n"
".word 0x4e8896e9 // sdot v9.4s, v23.16b, v8.16b\n"
".word 0x4e9e96ca // sdot v10.4s, v22.16b, v30.16b\n"
".word 0x4e9e9709 // sdot v9.4s, v24.16b, v30.16b\n"
".word 0x4e9d96ea // sdot v10.4s, v23.16b, v29.16b\n"
".word 0x4e9c970a // sdot v10.4s, v24.16b, v28.16b\n"
"sqrdmulh v9.4s, v9.4s, v27.4s\n"
"sqrdmulh v10.4s, v10.4s, v27.4s\n"
"sqrshl v9.4s, v9.4s, v26.4s\n"
"sqrshl v10.4s, v10.4s, v26.4s\n"
"sqxtn v9.4h, v9.4s\n"
"sqxtn2 v9.8h, v10.4s\n"
"sqadd v9.8h, v9.8h, v0.8h\n"
"sqxtn v9.8b, v9.8h\n"
"smax v9.8b, v9.8b, v1.8b\n"
"rev32 v31.8h, v31.8h\n"
"rev32 v8.8h, v8.8h\n"
"rev32 v30.8h, v30.8h\n"
"rev32 v29.8h, v29.8h\n"
"rev32 v28.8h, v28.8h\n"
"smin v9.8b, v9.8b, v2.8b\n"
"add x16, x25, x11\n"
"subs x15, x15, #1\n" // =1
"trn1 v31.8h, v31.8h, v6.8h\n"
"trn1 v8.8h, v8.8h, v15.8h\n"
"trn1 v29.8h, v29.8h, v13.8h\n"
"trn1 v30.8h, v30.8h, v14.8h\n"
"trn1 v28.8h, v28.8h, v11.8h\n"
"str s9, [x25]\n"
"add x25, x25, x22\n"
"st1 { v9.s }[1], [x16]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_9 "b\n"
DC_KERNEL_NO_MULT_STRIDE_10 ":\n" // in Loop: Header=BB112_4 Depth=1
"ldr q22, [x26, #16]\n"
"ldr q23, [x26, #48]\n"
"ldr q24, [x26, #80]\n"
"ldr q29, [x28, #16]!\n"
"ldr q25, [%[bias_data], #16]\n"
"ldr q26, [x24, #16]\n"
"ldr q27, [x14, #16]\n"
"ldr q8, [x28, x12]\n"
"ldr q31, [x28, x7]\n"
"ldr q30, [x28, x21]\n"
"ldr q28, [x28, x20]\n"
"ldr x23, [sp, #56]\n" // 8-byte Folded Reload
"cmp w6, #0\n" // =0
"mov v10.16b, v5.16b\n"
"b.le " DC_KERNEL_NO_MULT_STRIDE_24 "f\n"
// %bb.11: // in Loop: Header=BB112_4 Depth=1
"mov v6.16b, v21.16b\n"
"mov v9.16b, v3.16b\n"
"mov w14, wzr\n"
"mov x15, xzr\n"
"add x16, x28, #32\n" // =32
"add x17, %[function_params], #4\n" // =4
"mov %[function_params], x6\n"
"mov v17.16b, v31.16b\n"
DC_KERNEL_NO_MULT_STRIDE_12 ":\n" // Parent Loop BB112_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v3.16b, v25.16b\n"
"mov v4.16b, v25.16b\n"
".word 0x4e9d96c3 // sdot v3.4s, v22.16b, v29.16b\n"
".word 0x4e9196c4 // sdot v4.4s, v22.16b, v17.16b\n"
".word 0x4e8896e3 // sdot v3.4s, v23.16b, v8.16b\n"
".word 0x4e9e96e4 // sdot v4.4s, v23.16b, v30.16b\n"
".word 0x4e919703 // sdot v3.4s, v24.16b, v17.16b\n"
".word 0x4e9c9704 // sdot v4.4s, v24.16b, v28.16b\n"
"sqrdmulh v3.4s, v3.4s, v27.4s\n"
"and %[scratch_block_data], x15, #0xffffffe0\n"
"sqrdmulh v4.4s, v4.4s, v27.4s\n"
"sqrshl v3.4s, v3.4s, v26.4s\n"
"add %[scratch_block_data], x16, x0\n"
"sqrshl v4.4s, v4.4s, v26.4s\n"
"sqxtn v3.4h, v3.4s\n"
"rev32 v5.8h, v29.8h\n"
"rev32 v18.8h, v8.8h\n"
"rev32 v19.8h, v31.8h\n"
"rev32 v20.8h, v30.8h\n"
"ldr q29, [%[scratch_block_data]]\n"
"ldr q8, [%[scratch_block_data], x12]\n"
"ldr q31, [%[scratch_block_data], x7]\n"
"ldr q30, [%[scratch_block_data], x21]\n"
"rev32 v17.8h, v28.8h\n"
"ldr q28, [%[scratch_block_data], x20]\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqxtn v3.8b, v3.8h\n"
"add x8, x17, w14, sxtw\n"
"smax v3.8b, v3.8b, v1.8b\n"
"add %[scratch_block_data], x8, x11\n"
"smin v3.8b, v3.8b, v2.8b\n"
"mov v21.16b, v25.16b\n"
"str s3, [x8]\n"
"st1 { v3.s }[1], [%[scratch_block_data]]\n"
"trn1 v3.8h, v5.8h, v29.8h\n"
"mov v4.16b, v25.16b\n"
"trn1 v5.8h, v18.8h, v8.8h\n"
"trn1 v18.8h, v19.8h, v31.8h\n"
".word 0x4e8396d5 // sdot v21.4s, v22.16b, v3.16b\n"
"trn1 v19.8h, v20.8h, v30.8h\n"
".word 0x4e9296c4 // sdot v4.4s, v22.16b, v18.16b\n"
".word 0x4e8596f5 // sdot v21.4s, v23.16b, v5.16b\n"
"trn1 v17.8h, v17.8h, v28.8h\n"
".word 0x4e9396e4 // sdot v4.4s, v23.16b, v19.16b\n"
".word 0x4e929715 // sdot v21.4s, v24.16b, v18.16b\n"
".word 0x4e919704 // sdot v4.4s, v24.16b, v17.16b\n"
"sqrdmulh v3.4s, v21.4s, v27.4s\n"
"sqrdmulh v4.4s, v4.4s, v27.4s\n"
"sqrshl v3.4s, v3.4s, v26.4s\n"
"sqrshl v4.4s, v4.4s, v26.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqxtn v3.8b, v3.8h\n"
"add x8, x8, x19\n"
"smax v3.8b, v3.8b, v1.8b\n"
"add x15, x15, #32\n" // =32
"subs %[function_params], %[function_params], #1\n" // =1
"add %[scratch_block_data], x8, x11\n"
"smin v3.8b, v3.8b, v2.8b\n"
"add w14, w14, w22\n"
"mov v17.16b, v31.16b\n"
"str s3, [x8]\n"
"st1 { v3.s }[1], [%[scratch_block_data]]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_12 "b\n"
// %bb.13: // in Loop: Header=BB112_4 Depth=1
"mov v15.16b, v8.16b\n"
"mov v14.16b, v31.16b\n"
"mov v13.16b, v30.16b\n"
"mov v11.16b, v28.16b\n"
"mov w14, w6\n"
"mov v21.16b, v6.16b\n"
"mov v6.16b, v29.16b\n"
"mov v3.16b, v29.16b\n"
"cmp w14, w13\n"
"b.ge " DC_KERNEL_NO_MULT_STRIDE_2 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_25 "f\n"
DC_KERNEL_NO_MULT_STRIDE_14 ":\n" // in Loop: Header=BB112_4 Depth=1
"cmp w13, #1\n" // =1
"add x27, %[bias_data], #32\n" // =32
"b.lt " DC_KERNEL_NO_MULT_STRIDE_3 "b\n"
// %bb.15: // in Loop: Header=BB112_4 Depth=1
"ldr x8, [sp, #136]\n" // 8-byte Folded Reload
"lsl w14, w10, #3\n"
"stp q15, q14, [sp, #64]\n" // 32-byte Folded Spill
"stp q13, q11, [sp, #96]\n" // 32-byte Folded Spill
"add x15, x28, x12\n"
"lsl x16, x14, #2\n"
"ldp q10, q11, [x15]\n"
"add x15, x8, x16\n"
"ldr x8, [sp, #144]\n" // 8-byte Folded Reload
"ldp q30, q31, [x15]\n"
"add x15, x28, x7\n"
"ldp q22, q23, [x26]\n"
"add x16, x8, x16\n"
"ldr w8, [sp, #44]\n" // 4-byte Folded Reload
"ldp q24, q25, [x26, #32]\n"
"ldp q26, q27, [x26, #64]\n"
"ldp q17, q18, [%[bias_data]]\n"
"ldp q14, q13, [x28], #32\n"
"ldp q8, q9, [x16]\n"
"ldp q12, q15, [x15]\n"
"add %[bias_data], %[output_block_data], x14\n"
"cmp w13, w8\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_27 "f\n"
// %bb.16: // in Loop: Header=BB112_4 Depth=1
"ldr x25, [sp, #32]\n" // 8-byte Folded Reload
"mov x14, xzr\n"
"mov w4, wzr\n"
"mov x24, x13\n"
"cbnz x25, " DC_KERNEL_NO_MULT_STRIDE_20 "f\n"
"b " DC_KERNEL_NO_MULT_STRIDE_21 "f\n"
DC_KERNEL_NO_MULT_STRIDE_17 ":\n" // in Loop: Header=BB112_21 Depth=2
"mov v28.16b, v17.16b\n"
".word 0x4e8e96dc // sdot v28.4s, v22.16b, v14.16b\n"
"mov v29.16b, v18.16b\n"
".word 0x4e8d96fd // sdot v29.4s, v23.16b, v13.16b\n"
".word 0x4e8a971c // sdot v28.4s, v24.16b, v10.16b\n"
".word 0x4e8b973d // sdot v29.4s, v25.16b, v11.16b\n"
".word 0x4e8c975c // sdot v28.4s, v26.16b, v12.16b\n"
".word 0x4e8f977d // sdot v29.4s, v27.16b, v15.16b\n"
"sqrdmulh v28.4s, v28.4s, v8.4s\n"
"sqrdmulh v29.4s, v29.4s, v9.4s\n"
"sqrshl v28.4s, v28.4s, v30.4s\n"
"sqrshl v29.4s, v29.4s, v31.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqxtn v28.8b, v28.8h\n"
"smax v28.8b, v28.8b, v1.8b\n"
"smin v28.8b, v28.8b, v2.8b\n"
"mov v14.16b, v3.16b\n"
"mov v10.16b, v20.16b\n"
"mov v12.16b, v16.16b\n"
"mov v13.16b, v19.16b\n"
"mov v11.16b, v21.16b\n"
"mov v15.16b, v5.16b\n"
"str d28, [x15, x19]\n"
DC_KERNEL_NO_MULT_STRIDE_18 ":\n" // in Loop: Header=BB112_21 Depth=2
"add w4, w4, w22\n"
"add x14, x14, #32\n" // =32
"subs x24, x24, #1\n" // =1
"sub x25, x25, #1\n" // =1
"b.eq " DC_KERNEL_NO_MULT_STRIDE_33 "f\n"
// %bb.19: // in Loop: Header=BB112_21 Depth=2
"cbz x25, " DC_KERNEL_NO_MULT_STRIDE_21 "f\n"
DC_KERNEL_NO_MULT_STRIDE_20 ":\n" // in Loop: Header=BB112_4 Depth=1
"and x15, x14, #0xffffffe0\n"
"add x15, x28, x15\n"
"add x16, x15, x12\n"
"add x17, x15, x7\n"
"ldp q3, q19, [x15]\n"
"ldp q20, q21, [x16]\n"
"ldp q16, q5, [x17]\n"
DC_KERNEL_NO_MULT_STRIDE_21 ":\n" // Parent Loop BB112_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v28.16b, v17.16b\n"
"mov v29.16b, v18.16b\n"
".word 0x4e8e96dc // sdot v28.4s, v22.16b, v14.16b\n"
".word 0x4e8a971c // sdot v28.4s, v24.16b, v10.16b\n"
".word 0x4e8d96fd // sdot v29.4s, v23.16b, v13.16b\n"
".word 0x4e8c975c // sdot v28.4s, v26.16b, v12.16b\n"
".word 0x4e8b973d // sdot v29.4s, v25.16b, v11.16b\n"
".word 0x4e8f977d // sdot v29.4s, v27.16b, v15.16b\n"
"sqrdmulh v28.4s, v28.4s, v8.4s\n"
"sqrdmulh v29.4s, v29.4s, v9.4s\n"
"sqrshl v28.4s, v28.4s, v30.4s\n"
"sqrshl v29.4s, v29.4s, v31.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqxtn v28.8b, v28.8h\n"
"rev32 v14.8h, v14.8h\n"
"rev32 v10.8h, v10.8h\n"
"rev32 v12.8h, v12.8h\n"
"rev32 v13.8h, v13.8h\n"
"rev32 v11.8h, v11.8h\n"
"rev32 v15.8h, v15.8h\n"
"smax v28.8b, v28.8b, v1.8b\n"
"add x15, %[bias_data], w4, sxtw\n"
"cmp w5, #1\n" // =1
"trn1 v14.8h, v14.8h, v3.8h\n"
"trn1 v13.8h, v13.8h, v19.8h\n"
"trn1 v10.8h, v10.8h, v20.8h\n"
"trn1 v11.8h, v11.8h, v21.8h\n"
"trn1 v12.8h, v12.8h, v16.8h\n"
"smin v28.8b, v28.8b, v2.8b\n"
"trn1 v15.8h, v15.8h, v5.8h\n"
"str d28, [x15]\n"
"b.gt " DC_KERNEL_NO_MULT_STRIDE_17 "b\n"
// %bb.22: // in Loop: Header=BB112_21 Depth=2
"cbz x25, " DC_KERNEL_NO_MULT_STRIDE_18 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_17 "b\n"
DC_KERNEL_NO_MULT_STRIDE_23 ":\n" // in Loop: Header=BB112_4 Depth=1
"mov w15, wzr\n"
"cmp w15, w13\n"
"ldr x15, [sp, #128]\n" // 8-byte Folded Reload
"b.lt " DC_KERNEL_NO_MULT_STRIDE_9 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_10 "b\n"
DC_KERNEL_NO_MULT_STRIDE_24 ":\n" // in Loop: Header=BB112_4 Depth=1
"mov v9.16b, v3.16b\n"
"mov w14, wzr\n"
"cmp w14, w13\n"
"b.ge " DC_KERNEL_NO_MULT_STRIDE_2 "b\n"
DC_KERNEL_NO_MULT_STRIDE_25 ":\n" // in Loop: Header=BB112_4 Depth=1
"ldr x8, [sp, #8]\n" // 8-byte Folded Reload
"ldr x15, [sp, #128]\n" // 8-byte Folded Reload
"add x14, x8, x27\n"
DC_KERNEL_NO_MULT_STRIDE_26 ":\n" // Parent Loop BB112_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v3.16b, v25.16b\n"
"mov v4.16b, v25.16b\n"
".word 0x4e9d96c3 // sdot v3.4s, v22.16b, v29.16b\n"
".word 0x4e8896e3 // sdot v3.4s, v23.16b, v8.16b\n"
".word 0x4e9f96c4 // sdot v4.4s, v22.16b, v31.16b\n"
".word 0x4e9f9703 // sdot v3.4s, v24.16b, v31.16b\n"
".word 0x4e9e96e4 // sdot v4.4s, v23.16b, v30.16b\n"
".word 0x4e9c9704 // sdot v4.4s, v24.16b, v28.16b\n"
"sqrdmulh v3.4s, v3.4s, v27.4s\n"
"sqrdmulh v4.4s, v4.4s, v27.4s\n"
"sqrshl v3.4s, v3.4s, v26.4s\n"
"sqrshl v4.4s, v4.4s, v26.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqxtn v3.8b, v3.8h\n"
"smax v3.8b, v3.8b, v1.8b\n"
"rev32 v5.8h, v29.8h\n"
"rev32 v17.8h, v8.8h\n"
"rev32 v18.8h, v31.8h\n"
"rev32 v19.8h, v30.8h\n"
"rev32 v20.8h, v28.8h\n"
"smin v3.8b, v3.8b, v2.8b\n"
"add x16, x14, x11\n"
"subs x15, x15, #1\n" // =1
"trn1 v29.8h, v5.8h, v6.8h\n"
"trn1 v8.8h, v17.8h, v15.8h\n"
"trn1 v30.8h, v19.8h, v13.8h\n"
"trn1 v31.8h, v18.8h, v14.8h\n"
"trn1 v28.8h, v20.8h, v11.8h\n"
"str s3, [x14]\n"
"add x14, x14, x22\n"
"st1 { v3.s }[1], [x16]\n"
"b.ne " DC_KERNEL_NO_MULT_STRIDE_26 "b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_2 "b\n"
DC_KERNEL_NO_MULT_STRIDE_27 ":\n" // in Loop: Header=BB112_4 Depth=1
"ldr x25, [sp, #32]\n" // 8-byte Folded Reload
"mov w14, wzr\n"
"mov %[function_params], xzr\n"
"mov x24, x13\n"
"str q6, [sp, #16]\n" // 16-byte Folded Spill
"b " DC_KERNEL_NO_MULT_STRIDE_30 "f\n"
DC_KERNEL_NO_MULT_STRIDE_28 ":\n" // in Loop: Header=BB112_30 Depth=2
"mov v3.16b, v17.16b\n"
".word 0x4e8e96c3 // sdot v3.4s, v22.16b, v14.16b\n"
"mov v4.16b, v18.16b\n"
".word 0x4e8d96e4 // sdot v4.4s, v23.16b, v13.16b\n"
".word 0x4e8a9703 // sdot v3.4s, v24.16b, v10.16b\n"
".word 0x4e8b9724 // sdot v4.4s, v25.16b, v11.16b\n"
".word 0x4e8c9743 // sdot v3.4s, v26.16b, v12.16b\n"
".word 0x4e8f9764 // sdot v4.4s, v27.16b, v15.16b\n"
"sqrdmulh v3.4s, v3.4s, v8.4s\n"
"sqrdmulh v4.4s, v4.4s, v9.4s\n"
"sqrshl v3.4s, v3.4s, v30.4s\n"
"sqrshl v4.4s, v4.4s, v31.4s\n"
"sqxtn v3.4h, v3.4s\n"
"sqxtn2 v3.8h, v4.4s\n"
"sqadd v3.8h, v3.8h, v0.8h\n"
"sqxtn v3.8b, v3.8h\n"
"smax v3.8b, v3.8b, v1.8b\n"
"smin v3.8b, v3.8b, v2.8b\n"
"str d3, [x15, x19]\n"
"mov v3.16b, v6.16b\n"
"mov v14.16b, v6.16b\n"
"mov v10.16b, v20.16b\n"
"mov v12.16b, v16.16b\n"
"mov v13.16b, v19.16b\n"
"mov v11.16b, v21.16b\n"
"mov v15.16b, v5.16b\n"
DC_KERNEL_NO_MULT_STRIDE_29 ":\n" // in Loop: Header=BB112_30 Depth=2
"add %[function_params], %[function_params], #" STR(DP_OFFSET_OUTPUT_MULTIPLIER) "\n" // =32
"sub x25, x25, #1\n" // =1
"subs x24, x24, #1\n" // =1
"add w14, w14, w22\n"
"b.eq " DC_KERNEL_NO_MULT_STRIDE_34 "f\n"
DC_KERNEL_NO_MULT_STRIDE_30 ":\n" // Parent Loop BB112_4 Depth=1
// => This Inner Loop Header: Depth=2
"mov v28.16b, v17.16b\n"
"mov v29.16b, v18.16b\n"
".word 0x4e8e96dc // sdot v28.4s, v22.16b, v14.16b\n"
"and x16, %[function_params], #0xffffffe0\n"
".word 0x4e8d96fd // sdot v29.4s, v23.16b, v13.16b\n"
".word 0x4e8a971c // sdot v28.4s, v24.16b, v10.16b\n"
"add x16, x28, x16\n"
".word 0x4e8b973d // sdot v29.4s, v25.16b, v11.16b\n"
".word 0x4e8c975c // sdot v28.4s, v26.16b, v12.16b\n"
"rev32 v19.8h, v14.8h\n"
"rev32 v3.8h, v13.8h\n"
"ldp q14, q13, [x16]\n"
".word 0x4e8f977d // sdot v29.4s, v27.16b, v15.16b\n"
"sqrdmulh v28.4s, v28.4s, v8.4s\n"
"sqrdmulh v29.4s, v29.4s, v9.4s\n"
"sqrshl v28.4s, v28.4s, v30.4s\n"
"add x17, x16, x12\n"
"add x16, x16, x7\n"
"sqrshl v29.4s, v29.4s, v31.4s\n"
"sqxtn v28.4h, v28.4s\n"
"rev32 v21.8h, v12.8h\n"
"rev32 v4.8h, v11.8h\n"
"ldp q20, q11, [x17]\n"
"ldp q12, q5, [x16]\n"
"sqxtn2 v28.8h, v29.4s\n"
"mov v6.16b, v14.16b\n"
"trn1 v14.8h, v19.8h, v14.8h\n"
"mov v19.16b, v13.16b\n"
"trn1 v13.8h, v3.8h, v13.8h\n"
"sqadd v3.8h, v28.8h, v0.8h\n"
"sqxtn v3.8b, v3.8h\n"
"rev32 v16.8h, v10.8h\n"
"rev32 v7.8h, v15.8h\n"
"smax v3.8b, v3.8b, v1.8b\n"
"add x15, %[bias_data], w14, sxtw\n"
"cmp w5, #1\n" // =1
"trn1 v10.8h, v16.8h, v20.8h\n"
"mov v16.16b, v12.16b\n"
"trn1 v12.8h, v21.8h, v12.8h\n"
"mov v21.16b, v11.16b\n"
"trn1 v11.8h, v4.8h, v11.8h\n"
"smin v3.8b, v3.8b, v2.8b\n"
"trn1 v15.8h, v7.8h, v5.8h\n"
"str d3, [x15]\n"
"b.gt " DC_KERNEL_NO_MULT_STRIDE_28 "b\n"
// %bb.31: // in Loop: Header=BB112_30 Depth=2
"cbnz x25, " DC_KERNEL_NO_MULT_STRIDE_28 "b\n"
// %bb.32: // in Loop: Header=BB112_30 Depth=2
"mov v3.16b, v6.16b\n"
"b " DC_KERNEL_NO_MULT_STRIDE_29 "b\n"
DC_KERNEL_NO_MULT_STRIDE_33 ":\n" // in Loop: Header=BB112_4 Depth=1
"ldp q13, q11, [sp, #96]\n" // 32-byte Folded Reload
"ldp q15, q14, [sp, #64]\n" // 32-byte Folded Reload
"b " DC_KERNEL_NO_MULT_STRIDE_3 "b\n"
DC_KERNEL_NO_MULT_STRIDE_34 ":\n" // in Loop: Header=BB112_4 Depth=1
"ldp q13, q11, [sp, #96]\n" // 32-byte Folded Reload
"ldp q15, q14, [sp, #64]\n" // 32-byte Folded Reload
"ldr q6, [sp, #16]\n" // 16-byte Folded Reload
"b " DC_KERNEL_NO_MULT_STRIDE_3 "b\n"
DC_KERNEL_NO_MULT_STRIDE_35 ":\n"
// Compiled intrinsics total stack 320, now 176 for spillage only.
"add sp, sp, #176\n" // =320
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_NO_MULT_STRIDE_1
#undef DC_KERNEL_NO_MULT_STRIDE_2
#undef DC_KERNEL_NO_MULT_STRIDE_3
#undef DC_KERNEL_NO_MULT_STRIDE_4
#undef DC_KERNEL_NO_MULT_STRIDE_5
#undef DC_KERNEL_NO_MULT_STRIDE_6
#undef DC_KERNEL_NO_MULT_STRIDE_7
#undef DC_KERNEL_NO_MULT_STRIDE_8
#undef DC_KERNEL_NO_MULT_STRIDE_9
#undef DC_KERNEL_NO_MULT_STRIDE_10
#undef DC_KERNEL_NO_MULT_STRIDE_11
#undef DC_KERNEL_NO_MULT_STRIDE_12
#undef DC_KERNEL_NO_MULT_STRIDE_13
#undef DC_KERNEL_NO_MULT_STRIDE_14
#undef DC_KERNEL_NO_MULT_STRIDE_15
#undef DC_KERNEL_NO_MULT_STRIDE_16
#undef DC_KERNEL_NO_MULT_STRIDE_17
#undef DC_KERNEL_NO_MULT_STRIDE_18
#undef DC_KERNEL_NO_MULT_STRIDE_19
#undef DC_KERNEL_NO_MULT_STRIDE_20
#undef DC_KERNEL_NO_MULT_STRIDE_21
#undef DC_KERNEL_NO_MULT_STRIDE_22
#undef DC_KERNEL_NO_MULT_STRIDE_23
#undef DC_KERNEL_NO_MULT_STRIDE_24
#undef DC_KERNEL_NO_MULT_STRIDE_25
#undef DC_KERNEL_NO_MULT_STRIDE_26
#undef DC_KERNEL_NO_MULT_STRIDE_27
#undef DC_KERNEL_NO_MULT_STRIDE_28
#undef DC_KERNEL_NO_MULT_STRIDE_29
#undef DC_KERNEL_NO_MULT_STRIDE_30
#undef DC_KERNEL_NO_MULT_STRIDE_31
#undef DC_KERNEL_NO_MULT_STRIDE_32
#undef DC_KERNEL_NO_MULT_STRIDE_33
#undef DC_KERNEL_NO_MULT_STRIDE_34
#undef DC_KERNEL_NO_MULT_STRIDE_35
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/1> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_MULT_1 "1"
#define DC_KERNEL_MULT_2 "2"
#define DC_KERNEL_MULT_3 "3"
#define DC_KERNEL_MULT_4 "4"
#define DC_KERNEL_MULT_5 "5"
#define DC_KERNEL_MULT_6 "6"
#define DC_KERNEL_MULT_7 "7"
#define DC_KERNEL_MULT_8 "8"
#define DC_KERNEL_MULT_9 "9"
#define DC_KERNEL_MULT_10 "10"
#define DC_KERNEL_MULT_11 "11"
#define DC_KERNEL_MULT_12 "12"
#define DC_KERNEL_MULT_13 "13"
#define DC_KERNEL_MULT_14 "14"
#define DC_KERNEL_MULT_15 "15"
#define DC_KERNEL_MULT_16 "16"
#define DC_KERNEL_MULT_17 "17"
#define DC_KERNEL_MULT_18 "18"
#define DC_KERNEL_MULT_19 "19"
#define DC_KERNEL_MULT_20 "20"
#define DC_KERNEL_MULT_21 "21"
#define DC_KERNEL_MULT_22 "22"
#define DC_KERNEL_MULT_23 "23"
asm volatile(
// Compiled code used block of 336 for spill out of total stack of 448.
// However, an 8-byte spill was sneaked in to #344.
// Spillage increased to 352 and these are mapped to #336.
"sub sp, sp, #352\n" // =448
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"str %[filter_workspace], [sp, #56]\n" // 8-byte Folded Spill
"cmp w8, #1\n" // =1
"str x8, [sp, #32]\n" // 8-byte Folded Spill
"b.lt " DC_KERNEL_MULT_23 "f\n"
// %bb.1:
"ldr w11, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"ldr x12, [%[function_params], #" STR(DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL) "]\n"
"ldp w17, w15, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldr w16, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"ldpsw x21, x6, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldrb w8, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "]\n"
"ldrb w9, [%[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "]\n"
"add x10, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"str x12, [sp, #24]\n" // 8-byte Folded Spill
"ldr x12, [%[function_params], #" STR(DP_OFFSET_OUTPUT_SHIFT_PER_CHANNEL) "]\n"
"ldrsw %[function_params], [%[function_params], #" STR(DP_OFFSET_OUTPUT_DEPTH) "]\n"
"cmp w11, #4\n" // =4
"ccmp w15, w17, #0, lt\n"
"csel w25, w15, w17, lt\n"
"cmp w16, #1\n" // =1
"str x16, [sp, #80]\n" // 8-byte Folded Spill
"cset w16, lt\n"
"cmp w17, #1\n" // =1
"dup v1.16b, w8\n"
"fmov s3, w8\n"
"dup v2.16b, w9\n"
"fmov s4, w9\n"
"lsl x8, %[function_params], #1\n"
"add x9, x21, %[function_params]\n"
"str w17, [sp, #324]\n" // 4-byte Folded Spill
"cset w17, lt\n"
"ld1r { v0.8h }, [x10]\n"
"lsl x7, x21, #1\n"
"add x22, x21, x21, lsl #1\n"
"add x10, x8, %[function_params]\n"
"add x9, %[output_block_data], x9\n"
"orr w16, w16, w17\n"
"str x9, [sp, #216]\n" // 8-byte Folded Spill
"str w15, [sp, #316]\n" // 4-byte Folded Spill
"add x9, x10, x22\n"
"add x15, x10, x7\n"
"str w16, [sp, #12]\n" // 4-byte Folded Spill
"add x16, x10, x21\n"
"add x10, %[output_block_data], x10\n"
"str x10, [sp, #200]\n" // 8-byte Folded Spill
"add x10, x6, #4\n" // =4
"str x10, [sp, #160]\n" // 8-byte Folded Spill
"lsl x10, %[function_params], #2\n"
"str x10, [sp, #152]\n" // 8-byte Folded Spill
"add x10, %[output_block_data], x21\n"
"add x17, x6, x6, lsl #2\n"
"str x10, [sp, #144]\n" // 8-byte Folded Spill
"add x10, %[output_block_data], %[function_params]\n"
"lsl x24, x6, #2\n"
"str x10, [sp, #136]\n" // 8-byte Folded Spill
"add x10, x17, #4\n" // =4
"add x19, x6, x6, lsl #1\n"
"str x10, [sp, #128]\n" // 8-byte Folded Spill
"add x10, x24, #4\n" // =4
"str x12, [sp, #16]\n" // 8-byte Folded Spill
"str w11, [sp, #320]\n" // 4-byte Folded Spill
"lsl x20, x6, #1\n"
"add x11, x8, x22\n"
"add x12, x8, x7\n"
"add x13, x8, x21\n"
"add x8, %[output_block_data], x8\n"
"str x10, [sp, #120]\n" // 8-byte Folded Spill
"add x10, x19, #4\n" // =4
"stp x8, x7, [sp, #224]\n" // 16-byte Folded Spill
"add x8, x22, %[function_params]\n"
"str x10, [sp, #112]\n" // 8-byte Folded Spill
"add x10, x20, #4\n" // =4
"mov x5, xzr\n"
"add x14, x7, %[function_params]\n"
"add x8, %[output_block_data], x8\n"
"str x10, [sp, #104]\n" // 8-byte Folded Spill
"add x10, %[output_block_data], x7\n"
"add x26, %[output_block_data], x11\n"
"str x8, [sp, #184]\n" // 8-byte Folded Spill
"add x8, %[output_block_data], x14\n"
"mov x14, x5\n"
"add x5, %[output_block_data], x9\n"
"add x9, %[output_block_data], x16\n"
"mov x16, x22\n"
"stp x19, x6, [sp, #296]\n" // 16-byte Folded Spill
"mov x11, x7\n"
"str x20, [sp, #328]\n" // 8-byte Folded Spill
"str x10, [sp, #96]\n" // 8-byte Folded Spill
"add x10, %[output_block_data], x22\n"
"stp x22, %[output_block_data], [sp, #64]\n" // 16-byte Folded Spill
"ldr x7, [sp, #160]\n" // 8-byte Folded Reload
"ldr x23, [sp, #136]\n" // 8-byte Folded Reload
"ldp x22, x19, [sp, #112]\n" // 16-byte Folded Reload
"ldr x20, [sp, #104]\n" // 8-byte Folded Reload
"mov %[filter_workspace], xzr\n"
"dup v3.8b, v3.b[0]\n"
"dup v4.8b, v4.b[0]\n"
"add x27, %[output_block_data], x12\n"
"add x28, %[output_block_data], x13\n"
"mov x13, %[filter_workspace]\n"
"stp x8, x17, [sp, #168]\n" // 16-byte Folded Spill
"add x8, %[output_block_data], x15\n"
"str x10, [sp, #88]\n" // 8-byte Folded Spill
"mov w10, #4\n"
"stp x21, %[scratch_block_data], [sp, #256]\n" // 16-byte Folded Spill
"str w25, [sp, #212]\n" // 4-byte Folded Spill
"str x24, [sp, #192]\n" // 8-byte Folded Spill
"str x9, [sp, #336]\n" // 8-byte Folded Spill
"b " DC_KERNEL_MULT_5 "f\n"
DC_KERNEL_MULT_2 ":\n" // in Loop: Header=BB107_5 Depth=1
"mov %[output_block_data], x21\n"
"ldp x21, %[scratch_block_data], [sp, #256]\n" // 16-byte Folded Reload
DC_KERNEL_MULT_3 ":\n" // in Loop: Header=BB107_5 Depth=1
"mov %[bias_data], x11\n"
DC_KERNEL_MULT_4 ":\n" // in Loop: Header=BB107_5 Depth=1
"ldp x12, x14, [sp, #32]\n" // 16-byte Folded Reload
"ldr x11, [sp, #72]\n" // 8-byte Folded Reload
"ldr x13, [sp, #48]\n" // 8-byte Folded Reload
"add x14, x14, #1\n" // =1
"add x11, x11, #8\n" // =8
"cmp x14, x12\n"
"add x13, x13, #8\n" // =8
"str x11, [sp, #72]\n" // 8-byte Folded Spill
"b.eq " DC_KERNEL_MULT_23 "f\n"
DC_KERNEL_MULT_5 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB107_19 Depth 2
// Child Loop BB107_21 Depth 3
// Child Loop BB107_22 Depth 4
// Child Loop BB107_8 Depth 2
// Child Loop BB107_10 Depth 3
// Child Loop BB107_14 Depth 3
"ldr x12, [sp, #56]\n" // 8-byte Folded Reload
"ldr x16, [sp, #80]\n" // 8-byte Folded Reload
"ldp q18, q5, [x12]\n"
"ldp q17, q6, [x12, #32]\n"
"ldp q16, q7, [x12, #64]\n"
"cmp w16, #4\n" // =4
"add x12, x12, #96\n" // =96
"stp x13, x12, [sp, #48]\n" // 16-byte Folded Spill
"str x14, [sp, #40]\n" // 8-byte Folded Spill
"b.ne " DC_KERNEL_MULT_16 "f\n"
// %bb.6: // in Loop: Header=BB107_5 Depth=1
"lsl w12, w14, #3\n"
"ldr x14, [sp, #16]\n" // 8-byte Folded Reload
"lsl x12, x12, #2\n"
"mov x15, xzr\n"
"mov %[filter_workspace], x13\n"
"add x11, x14, x12\n"
"ldr x14, [sp, #24]\n" // 8-byte Folded Reload
"str x11, [sp, #248]\n" // 8-byte Folded Spill
"add x11, x14, x12\n"
"str x11, [sp, #240]\n" // 8-byte Folded Spill
"b " DC_KERNEL_MULT_8 "f\n"
DC_KERNEL_MULT_7 ":\n" // in Loop: Header=BB107_8 Depth=2
"add x15, x15, #1\n" // =1
"cmp x15, #2\n" // =2
"add %[filter_workspace], x1, #4\n" // =4
"mov v16.16b, v7.16b\n"
"mov v17.16b, v6.16b\n"
"mov v18.16b, v5.16b\n"
"b.eq " DC_KERNEL_MULT_4 "b\n"
DC_KERNEL_MULT_8 ":\n" // Parent Loop BB107_5 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB107_10 Depth 3
// Child Loop BB107_14 Depth 3
"ldr q19, [%[bias_data]], #16\n"
"ldr x11, [sp, #248]\n" // 8-byte Folded Reload
"lsl x12, x15, #4\n"
"ldr w13, [%[scratch_block_data]]\n"
"ldr x16, [sp, #328]\n" // 8-byte Folded Reload
"ldr q20, [x11, x12]\n"
"ldr x11, [sp, #240]\n" // 8-byte Folded Reload
"ldr w6, [%[scratch_block_data], x24]\n"
"ldr w16, [%[scratch_block_data], x16]\n"
"ldr q21, [x11, x12]\n"
"ldp x12, x14, [sp, #296]\n" // 16-byte Folded Reload
"fmov s22, w13\n"
"add x14, %[scratch_block_data], x14\n"
"mov v22.s[1], w13\n"
"fmov s23, w6\n"
"ldr w12, [%[scratch_block_data], x12]\n"
"ld1 { v22.s }[2], [x14]\n"
"add x14, %[scratch_block_data], x17\n"
"mov v23.s[1], w6\n"
"ld1 { v23.s }[2], [x14]\n"
"fmov s24, w16\n"
"mov v24.s[1], w16\n"
"dup v25.4s, w16\n"
"mov v28.16b, v19.16b\n"
"mov v29.16b, v19.16b\n"
"mov v30.16b, v19.16b\n"
"dup v26.4s, w12\n"
"mov v31.16b, v19.16b\n"
"mov v24.s[2], w12\n"
"cmp w25, #1\n" // =1
".word 0x4e99961c // sdot v28.4s, v16.16b, v25.16b\n"
".word 0x4e99963d // sdot v29.4s, v17.16b, v25.16b\n"
".word 0x4e99965e // sdot v30.4s, v18.16b, v25.16b\n"
"mov v24.s[3], w16\n"
"mov v22.s[3], w13\n"
"mov v23.s[3], w6\n"
".word 0x4e9a965f // sdot v31.4s, v18.16b, v26.16b\n"
"b.lt " DC_KERNEL_MULT_15 "f\n"
// %bb.9: // in Loop: Header=BB107_8 Depth=2
"stp x15, %[bias_data], [sp, #280]\n" // 16-byte Folded Spill
"mov w13, w25\n"
"str %[filter_workspace], [sp, #272]\n" // 8-byte Folded Spill
"mov x16, %[filter_workspace]\n"
"mov x14, %[scratch_block_data]\n"
"ldp x25, %[scratch_block_data], [sp, #216]\n" // 16-byte Folded Reload
"mov x24, x28\n"
"mov x28, x27\n"
"ldr x27, [sp, #200]\n" // 8-byte Folded Reload
"ldr x17, [sp, #184]\n" // 8-byte Folded Reload
"mov x9, x8\n"
"mov x8, x5\n"
"ldr x5, [sp, #168]\n" // 8-byte Folded Reload
"ldp x15, x10, [sp, #144]\n" // 16-byte Folded Reload
"ldr %[bias_data], [sp, #128]\n" // 8-byte Folded Reload
"ldp %[filter_workspace], x11, [sp, #88]\n" // 16-byte Folded Reload
"shl v25.4s, v18.4s, #8\n"
"shl v26.4s, v17.4s, #8\n"
"shl v27.4s, v16.4s, #8\n"
"mov x21, %[output_block_data]\n"
DC_KERNEL_MULT_10 ":\n" // Parent Loop BB107_5 Depth=1
// Parent Loop BB107_8 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4f96e25c // sdot v28.4s, v18.16b, v22.4b[0]\n"
".word 0x4f96ea5d // sdot v29.4s, v18.16b, v22.4b[2]\n"
".word 0x4f98ea3e // sdot v30.4s, v17.16b, v24.4b[2]\n"
".word 0x4f96ea3c // sdot v28.4s, v17.16b, v22.4b[2]\n"
".word 0x4f97e23f // sdot v31.4s, v17.16b, v23.4b[0]\n"
".word 0x4f98ea1d // sdot v29.4s, v16.16b, v24.4b[2]\n"
".word 0x4f97e21e // sdot v30.4s, v16.16b, v23.4b[0]\n"
"sqrdmulh v28.4s, v28.4s, v21.4s\n"
".word 0x4f97ea1f // sdot v31.4s, v16.16b, v23.4b[2]\n"
"sqrdmulh v29.4s, v29.4s, v21.4s\n"
"sqrdmulh v30.4s, v30.4s, v21.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqrshl v29.4s, v29.4s, v20.4s\n"
"sqrshl v30.4s, v30.4s, v20.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqrshl v31.4s, v31.4s, v20.4s\n"
"sqxtn v30.4h, v30.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqxtn2 v30.8h, v31.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqadd v29.8h, v30.8h, v0.8h\n"
"sqxtn v28.8b, v28.8h\n"
"sqxtn2 v28.16b, v29.8h\n"
"smax v28.16b, v28.16b, v1.16b\n"
"add %[output_block_data], x15, x16\n"
"smin v28.16b, v28.16b, v2.16b\n"
"add x6, x11, x16\n"
"str s28, [x21, x16]\n"
"st1 { v28.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], %[filter_workspace], x16\n"
"st1 { v28.s }[2], [x6]\n"
"st1 { v28.s }[3], [%[output_block_data]]\n"
"mov x12, x14\n"
"add x6, x14, x20\n"
"ldr w3, [x14, #4]!\n"
"ld1 { v24.s }[1], [x6]\n"
"add x6, x12, x19\n"
"ld1 { v23.s }[1], [x6]\n"
"mov v22.s[1], w3\n"
"add %[output_block_data], x12, x22\n"
"ld1 { v24.s }[3], [%[output_block_data]]\n"
"add %[output_block_data], x12, x7\n"
"ld1 { v22.s }[3], [%[output_block_data]]\n"
"add x12, x12, %[bias_data]\n"
"mov v28.16b, v19.16b\n"
"ld1 { v23.s }[3], [x12]\n"
"mov v29.16b, v19.16b\n"
"mov v30.16b, v19.16b\n"
".word 0x4f96e33c // sdot v28.4s, v25.16b, v22.4b[0]\n"
"mov v31.16b, v19.16b\n"
".word 0x4f98e33e // sdot v30.4s, v25.16b, v24.4b[0]\n"
".word 0x4f96eb3d // sdot v29.4s, v25.16b, v22.4b[2]\n"
".word 0x4f96eb5c // sdot v28.4s, v26.16b, v22.4b[2]\n"
".word 0x4f98eb3f // sdot v31.4s, v25.16b, v24.4b[2]\n"
".word 0x4f98eb5e // sdot v30.4s, v26.16b, v24.4b[2]\n"
".word 0x4f98e35d // sdot v29.4s, v26.16b, v24.4b[0]\n"
".word 0x4f98e37c // sdot v28.4s, v27.16b, v24.4b[0]\n"
".word 0x4f97e35f // sdot v31.4s, v26.16b, v23.4b[0]\n"
".word 0x4f97e37e // sdot v30.4s, v27.16b, v23.4b[0]\n"
".word 0x4f98eb7d // sdot v29.4s, v27.16b, v24.4b[2]\n"
"sqrdmulh v28.4s, v28.4s, v21.4s\n"
".word 0x4f97eb7f // sdot v31.4s, v27.16b, v23.4b[2]\n"
"sqrdmulh v30.4s, v30.4s, v21.4s\n"
"sqrdmulh v29.4s, v29.4s, v21.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqrshl v30.4s, v30.4s, v20.4s\n"
"sqrshl v29.4s, v29.4s, v20.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqrshl v31.4s, v31.4s, v20.4s\n"
"sqxtn v30.4h, v30.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqxtn2 v30.8h, v31.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqadd v29.8h, v30.8h, v0.8h\n"
"sqxtn v28.8b, v28.8h\n"
"sqxtn2 v28.16b, v29.8h\n"
"smax v28.16b, v28.16b, v1.16b\n"
"add x12, x25, x16\n"
"smin v28.16b, v28.16b, v2.16b\n"
"add %[output_block_data], x5, x16\n"
"str s28, [x23, x16]\n"
"st1 { v28.s }[1], [x12]\n"
"add x12, x17, x16\n"
"mov v29.16b, v19.16b\n"
"ushr v10.2d, v22.2d, #16\n"
"mov v30.16b, v19.16b\n"
"mov v31.16b, v19.16b\n"
"st1 { v28.s }[2], [%[output_block_data]]\n"
"st1 { v28.s }[3], [x12]\n"
"ushr v28.2d, v24.2d, #16\n"
".word 0x4f8ae25d // sdot v29.4s, v18.16b, v10.4b[0]\n"
"mov v8.16b, v19.16b\n"
".word 0x4f9ce25f // sdot v31.4s, v18.16b, v28.4b[0]\n"
".word 0x4f8aea5e // sdot v30.4s, v18.16b, v10.4b[2]\n"
".word 0x4f8aea3d // sdot v29.4s, v17.16b, v10.4b[2]\n"
"ushr v9.2d, v23.2d, #16\n"
".word 0x4f9cea48 // sdot v8.4s, v18.16b, v28.4b[2]\n"
".word 0x4f9cea3f // sdot v31.4s, v17.16b, v28.4b[2]\n"
".word 0x4f9ce23e // sdot v30.4s, v17.16b, v28.4b[0]\n"
".word 0x4f9ce21d // sdot v29.4s, v16.16b, v28.4b[0]\n"
".word 0x4f89e228 // sdot v8.4s, v17.16b, v9.4b[0]\n"
".word 0x4f89e21f // sdot v31.4s, v16.16b, v9.4b[0]\n"
".word 0x4f9cea1e // sdot v30.4s, v16.16b, v28.4b[2]\n"
"sqrdmulh v29.4s, v29.4s, v21.4s\n"
".word 0x4f89ea08 // sdot v8.4s, v16.16b, v9.4b[2]\n"
"sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqrdmulh v30.4s, v30.4s, v21.4s\n"
"sqrshl v29.4s, v29.4s, v20.4s\n"
"sqrdmulh v8.4s, v8.4s, v21.4s\n"
"sqrshl v31.4s, v31.4s, v20.4s\n"
"sqrshl v30.4s, v30.4s, v20.4s\n"
"sqxtn v29.4h, v29.4s\n"
"sqrshl v8.4s, v8.4s, v20.4s\n"
"sqxtn v31.4h, v31.4s\n"
"sqxtn2 v29.8h, v30.4s\n"
"sqxtn2 v31.8h, v8.4s\n"
"sqadd v29.8h, v29.8h, v0.8h\n"
"sqadd v30.8h, v31.8h, v0.8h\n"
"sqxtn v29.8b, v29.8h\n"
"sqxtn2 v29.16b, v30.8h\n"
"smax v29.16b, v29.16b, v1.16b\n"
"add %[output_block_data], x24, x16\n"
"smin v29.16b, v29.16b, v2.16b\n"
"mov v30.16b, v19.16b\n"
"add x12, x28, x16\n"
"str s29, [%[scratch_block_data], x16]\n"
"st1 { v29.s }[1], [%[output_block_data]]\n"
"add %[output_block_data], x26, x16\n"
"mov v31.16b, v19.16b\n"
"mov v8.16b, v19.16b\n"
".word 0x4f8ae33e // sdot v30.4s, v25.16b, v10.4b[0]\n"
"st1 { v29.s }[2], [x12]\n"
"st1 { v29.s }[3], [%[output_block_data]]\n"
"mov v29.16b, v19.16b\n"
".word 0x4f9ce328 // sdot v8.4s, v25.16b, v28.4b[0]\n"
".word 0x4f8aeb3f // sdot v31.4s, v25.16b, v10.4b[2]\n"
".word 0x4f8aeb5e // sdot v30.4s, v26.16b, v10.4b[2]\n"
".word 0x4f9ceb3d // sdot v29.4s, v25.16b, v28.4b[2]\n"
".word 0x4f9ceb48 // sdot v8.4s, v26.16b, v28.4b[2]\n"
".word 0x4f9ce35f // sdot v31.4s, v26.16b, v28.4b[0]\n"
".word 0x4f9ce37e // sdot v30.4s, v27.16b, v28.4b[0]\n"
".word 0x4f89e35d // sdot v29.4s, v26.16b, v9.4b[0]\n"
".word 0x4f89e368 // sdot v8.4s, v27.16b, v9.4b[0]\n"
".word 0x4f9ceb7f // sdot v31.4s, v27.16b, v28.4b[2]\n"
"sqrdmulh v30.4s, v30.4s, v21.4s\n"
".word 0x4f89eb7d // sdot v29.4s, v27.16b, v9.4b[2]\n"
"sqrdmulh v28.4s, v8.4s, v21.4s\n"
"sqrdmulh v31.4s, v31.4s, v21.4s\n"
"sqrshl v30.4s, v30.4s, v20.4s\n"
"sqrdmulh v29.4s, v29.4s, v21.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"sqrshl v31.4s, v31.4s, v20.4s\n"
"sqxtn v30.4h, v30.4s\n"
"ldr x12, [sp, #336]\n" // 8-byte Folded Reload
"sqrshl v29.4s, v29.4s, v20.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqxtn2 v30.8h, v31.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqadd v29.8h, v30.8h, v0.8h\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqxtn v29.8b, v29.8h\n"
"sqxtn2 v29.16b, v28.8h\n"
"smax v28.16b, v29.16b, v1.16b\n"
"add x12, x12, x16\n"
"smin v8.16b, v28.16b, v2.16b\n"
"mov v28.16b, v19.16b\n"
"mov v29.16b, v19.16b\n"
"mov v30.16b, v19.16b\n"
"mov v31.16b, v19.16b\n"
"ushr v24.2d, v24.2d, #32\n"
"add %[output_block_data], x9, x16\n"
"str s8, [x27, x16]\n"
"st1 { v8.s }[1], [x12]\n"
"add x12, x8, x16\n"
"subs w13, w13, #1\n" // =1
"ushr v22.2d, v22.2d, #32\n"
"ushr v23.2d, v23.2d, #32\n"
".word 0x4f98e21c // sdot v28.4s, v16.16b, v24.4b[0]\n"
".word 0x4f98e23d // sdot v29.4s, v17.16b, v24.4b[0]\n"
".word 0x4f98e25e // sdot v30.4s, v18.16b, v24.4b[0]\n"
".word 0x4f98ea5f // sdot v31.4s, v18.16b, v24.4b[2]\n"
"add x16, x16, x10\n"
"st1 { v8.s }[2], [%[output_block_data]]\n"
"st1 { v8.s }[3], [x12]\n"
"b.ne " DC_KERNEL_MULT_10 "b\n"
// %bb.11: // in Loop: Header=BB107_8 Depth=2
"ldr w25, [sp, #212]\n" // 4-byte Folded Reload
"add x13, x21, x16\n"
"mov %[output_block_data], x21\n"
"ldp x21, %[scratch_block_data], [sp, #256]\n" // 16-byte Folded Reload
"ldr x6, [sp, #232]\n" // 8-byte Folded Reload
"mov x27, x28\n"
"mov x28, x24\n"
"ldr x24, [sp, #192]\n" // 8-byte Folded Reload
"ldr x17, [sp, #176]\n" // 8-byte Folded Reload
"ldp x15, %[bias_data], [sp, #280]\n" // 16-byte Folded Reload
"ldr %[filter_workspace], [sp, #272]\n" // 8-byte Folded Reload
"mov w12, w25\n"
"mov x5, x8\n"
"mov x8, x9\n"
"mov w10, #4\n"
"ldr w16, [sp, #324]\n" // 4-byte Folded Reload
"cmp w12, w16\n"
"b.ge " DC_KERNEL_MULT_7 "b\n"
DC_KERNEL_MULT_12 ":\n" // in Loop: Header=BB107_8 Depth=2
"ldr w12, [sp, #320]\n" // 4-byte Folded Reload
"cmp w12, #1\n" // =1
"b.lt " DC_KERNEL_MULT_7 "b\n"
// %bb.13: // in Loop: Header=BB107_8 Depth=2
"add x12, x14, #4\n" // =4
"ldr x16, [sp, #328]\n" // 8-byte Folded Reload
"add x14, x12, x24\n"
"ld1 { v23.s }[1], [x14]\n"
"add x14, x12, x17\n"
"add x16, x12, x16\n"
"ld1 { v24.s }[1], [x16]\n"
"ld1 { v23.s }[3], [x14]\n"
"ldp x16, x14, [sp, #296]\n" // 16-byte Folded Reload
"add x16, x12, x16\n"
"ld1 { v24.s }[3], [x16]\n"
"ldr x16, [sp, #64]\n" // 8-byte Folded Reload
"ld1 { v22.s }[1], [x12], x14\n"
"ldr w14, [sp, #320]\n" // 4-byte Folded Reload
"ld1 { v22.s }[3], [x12]\n"
DC_KERNEL_MULT_14 ":\n" // Parent Loop BB107_5 Depth=1
// Parent Loop BB107_8 Depth=2
// => This Inner Loop Header: Depth=3
".word 0x4f96e25c // sdot v28.4s, v18.16b, v22.4b[0]\n"
".word 0x4f96ea5d // sdot v29.4s, v18.16b, v22.4b[2]\n"
".word 0x4f98ea3e // sdot v30.4s, v17.16b, v24.4b[2]\n"
".word 0x4f96ea3c // sdot v28.4s, v17.16b, v22.4b[2]\n"
".word 0x4f97e23f // sdot v31.4s, v17.16b, v23.4b[0]\n"
".word 0x4f98ea1d // sdot v29.4s, v16.16b, v24.4b[2]\n"
".word 0x4f97e21e // sdot v30.4s, v16.16b, v23.4b[0]\n"
"sqrdmulh v25.4s, v28.4s, v21.4s\n"
".word 0x4f97ea1f // sdot v31.4s, v16.16b, v23.4b[2]\n"
"sqrdmulh v26.4s, v29.4s, v21.4s\n"
"sqrdmulh v27.4s, v30.4s, v21.4s\n"
"sqrshl v25.4s, v25.4s, v20.4s\n"
"sqrdmulh v28.4s, v31.4s, v21.4s\n"
"sqrshl v26.4s, v26.4s, v20.4s\n"
"sqrshl v27.4s, v27.4s, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"sqxtn v27.4h, v27.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqxtn2 v27.8h, v28.4s\n"
"sqadd v25.8h, v25.8h, v0.8h\n"
"sqadd v26.8h, v27.8h, v0.8h\n"
"sqxtn v25.8b, v25.8h\n"
"sqxtn2 v25.16b, v26.8h\n"
"smax v25.16b, v25.16b, v1.16b\n"
"add x12, x13, x21\n"
"smin v25.16b, v25.16b, v2.16b\n"
"str s25, [x13]\n"
"st1 { v25.s }[1], [x12]\n"
"add x12, x13, x6\n"
"ushr v24.2d, v24.2d, #8\n"
"mov v28.16b, v19.16b\n"
"mov v29.16b, v19.16b\n"
"mov v30.16b, v19.16b\n"
"mov v31.16b, v19.16b\n"
"st1 { v25.s }[2], [x12]\n"
"add x12, x13, x16\n"
"subs w14, w14, #1\n" // =1
"ushr v22.2d, v22.2d, #8\n"
"ushr v23.2d, v23.2d, #8\n"
".word 0x4f98e21c // sdot v28.4s, v16.16b, v24.4b[0]\n"
".word 0x4f98e23d // sdot v29.4s, v17.16b, v24.4b[0]\n"
".word 0x4f98e25e // sdot v30.4s, v18.16b, v24.4b[0]\n"
"add x13, x13, %[function_params]\n"
".word 0x4f98ea5f // sdot v31.4s, v18.16b, v24.4b[2]\n"
"st1 { v25.s }[3], [x12]\n"
"b.ne " DC_KERNEL_MULT_14 "b\n"
"b " DC_KERNEL_MULT_7 "b\n"
DC_KERNEL_MULT_15 ":\n" // in Loop: Header=BB107_8 Depth=2
"ldr x11, [sp, #72]\n" // 8-byte Folded Reload
"ldr x6, [sp, #232]\n" // 8-byte Folded Reload
"mov w12, wzr\n"
"mov x14, %[scratch_block_data]\n"
"add x13, x11, x15, lsl #2\n"
"ldr w16, [sp, #324]\n" // 4-byte Folded Reload
"cmp w12, w16\n"
"b.lt " DC_KERNEL_MULT_12 "b\n"
"b " DC_KERNEL_MULT_7 "b\n"
DC_KERNEL_MULT_16 ":\n" // in Loop: Header=BB107_5 Depth=1
"ldr w16, [sp, #12]\n" // 4-byte Folded Reload
"add x11, %[bias_data], #32\n" // =32
"tbnz w16, #0, " DC_KERNEL_MULT_3 "b\n"
// %bb.17: // in Loop: Header=BB107_5 Depth=1
"ldp x13, x16, [sp, #16]\n" // 16-byte Folded Reload
"mov x12, x14\n"
"lsl w12, w12, #3\n"
"lsl x12, x12, #2\n"
"add x13, x13, x12\n"
"add x12, x16, x12\n"
"ldp q19, q20, [%[bias_data]]\n"
"ldp q21, q22, [x13]\n"
"ldp q23, q24, [x12]\n"
"ldr x15, [sp, #72]\n" // 8-byte Folded Reload
"ldr %[scratch_block_data], [sp, #304]\n" // 8-byte Folded Reload
"mov x21, %[output_block_data]\n"
"mov x14, xzr\n"
"b " DC_KERNEL_MULT_19 "f\n"
DC_KERNEL_MULT_18 ":\n" // in Loop: Header=BB107_19 Depth=2
"ldr x12, [sp, #80]\n" // 8-byte Folded Reload
"add x14, x14, #1\n" // =1
"cmp x14, x12\n"
"ldr x12, [sp, #256]\n" // 8-byte Folded Reload
"add x15, x15, x12\n"
"b.eq " DC_KERNEL_MULT_2 "b\n"
DC_KERNEL_MULT_19 ":\n" // Parent Loop BB107_5 Depth=1
// => This Loop Header: Depth=2
// Child Loop BB107_21 Depth 3
// Child Loop BB107_22 Depth 4
"ldr x12, [sp, #264]\n" // 8-byte Folded Reload
"mov w13, wzr\n"
"madd x6, x14, %[scratch_block_data], x12\n"
"ldr w12, [x6]\n"
"add x16, x6, %[scratch_block_data]\n"
"fmov s25, w12\n"
"mov v25.s[1], w12\n"
"ld1 { v25.s }[2], [x16]\n"
"ldr x16, [sp, #328]\n" // 8-byte Folded Reload
"mov v25.s[3], w12\n"
"add x16, x6, x16\n"
"ld1r { v26.4s }, [x16]\n"
"mov x16, x15\n"
"b " DC_KERNEL_MULT_21 "f\n"
DC_KERNEL_MULT_20 ":\n" // in Loop: Header=BB107_21 Depth=3
"ldr w12, [sp, #324]\n" // 4-byte Folded Reload
"add w13, w13, #1\n" // =1
"cmp w13, w12\n"
"b.eq " DC_KERNEL_MULT_18 "b\n"
DC_KERNEL_MULT_21 ":\n" // Parent Loop BB107_5 Depth=1
// Parent Loop BB107_19 Depth=2
// => This Loop Header: Depth=3
// Child Loop BB107_22 Depth 4
"ldr %[output_block_data], [sp, #328]\n" // 8-byte Folded Reload
"add x6, x6, #4\n" // =4
"mov x12, x6\n"
"ld1 { v25.s }[1], [x12], %[output_block_data]\n"
"ldr w3, [sp, #316]\n" // 4-byte Folded Reload
"ld1 { v26.s }[1], [x12]\n"
"ldr w12, [sp, #320]\n" // 4-byte Folded Reload
"cmp w13, w3\n"
"add %[output_block_data], x6, %[scratch_block_data]\n"
"ld1 { v25.s }[3], [%[output_block_data]]\n"
"csel w12, w12, w10, eq\n"
"cmp w12, #1\n" // =1
"b.lt " DC_KERNEL_MULT_20 "b\n"
DC_KERNEL_MULT_22 ":\n" // Parent Loop BB107_5 Depth=1
// Parent Loop BB107_19 Depth=2
// Parent Loop BB107_21 Depth=3
// => This Inner Loop Header: Depth=4
"mov v27.16b, v19.16b\n"
"mov v28.16b, v20.16b\n"
".word 0x4f99e25b // sdot v27.4s, v18.16b, v25.4b[0]\n"
".word 0x4f99e0bc // sdot v28.4s, v5.16b, v25.4b[0]\n"
".word 0x4f99ea3b // sdot v27.4s, v17.16b, v25.4b[2]\n"
".word 0x4f99e8dc // sdot v28.4s, v6.16b, v25.4b[2]\n"
".word 0x4f9ae21b // sdot v27.4s, v16.16b, v26.4b[0]\n"
".word 0x4f9ae0fc // sdot v28.4s, v7.16b, v26.4b[0]\n"
"sqrdmulh v27.4s, v27.4s, v23.4s\n"
"sqrdmulh v28.4s, v28.4s, v24.4s\n"
"sqrshl v27.4s, v27.4s, v21.4s\n"
"sqrshl v28.4s, v28.4s, v22.4s\n"
"sqxtn v27.4h, v27.4s\n"
"sqxtn2 v27.8h, v28.4s\n"
"sqadd v27.8h, v27.8h, v0.8h\n"
"sqxtn v27.8b, v27.8h\n"
"smax v27.8b, v27.8b, v3.8b\n"
"smin v27.8b, v27.8b, v4.8b\n"
"subs w12, w12, #1\n" // =1
"ushr v25.2d, v25.2d, #8\n"
"ushr v26.2d, v26.2d, #8\n"
"str d27, [x16]\n"
"add x16, x16, %[function_params]\n"
"b.ne " DC_KERNEL_MULT_22 "b\n"
"b " DC_KERNEL_MULT_20 "b\n"
DC_KERNEL_MULT_23 ":\n"
// Compiled intrinsics total stack 448, now 352 for spillage only.
"add sp, sp, #352\n" // =448
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_MULT_1
#undef DC_KERNEL_MULT_2
#undef DC_KERNEL_MULT_3
#undef DC_KERNEL_MULT_4
#undef DC_KERNEL_MULT_5
#undef DC_KERNEL_MULT_6
#undef DC_KERNEL_MULT_7
#undef DC_KERNEL_MULT_8
#undef DC_KERNEL_MULT_9
#undef DC_KERNEL_MULT_10
#undef DC_KERNEL_MULT_11
#undef DC_KERNEL_MULT_12
#undef DC_KERNEL_MULT_13
#undef DC_KERNEL_MULT_14
#undef DC_KERNEL_MULT_15
#undef DC_KERNEL_MULT_16
#undef DC_KERNEL_MULT_17
#undef DC_KERNEL_MULT_18
#undef DC_KERNEL_MULT_19
#undef DC_KERNEL_MULT_20
#undef DC_KERNEL_MULT_21
#undef DC_KERNEL_MULT_22
#undef DC_KERNEL_MULT_23
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<DepthwiseConvImplementation::kUseNeon3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/2> {
static inline void KernelMacroBlockNeon(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Note that argument registers may be reused after parameter loading.
// x0 %[scratch_block_data]
// x1 %[filter_workspace]
// x2 %[bias_data]
// x3 %[output_block_data]
// x4 %[function_params]
#define DC_KERNEL_MULT_STRIDE_1 "1"
#define DC_KERNEL_MULT_STRIDE_2 "2"
#define DC_KERNEL_MULT_STRIDE_3 "3"
#define DC_KERNEL_MULT_STRIDE_4 "4"
#define DC_KERNEL_MULT_STRIDE_5 "5"
#define DC_KERNEL_MULT_STRIDE_6 "6"
#define DC_KERNEL_MULT_STRIDE_7 "7"
#define DC_KERNEL_MULT_STRIDE_8 "8"
#define DC_KERNEL_MULT_STRIDE_9 "9"
#define DC_KERNEL_MULT_STRIDE_10 "10"
#define DC_KERNEL_MULT_STRIDE_11 "11"
#define DC_KERNEL_MULT_STRIDE_12 "12"
#define DC_KERNEL_MULT_STRIDE_13 "13"
#define DC_KERNEL_MULT_STRIDE_14 "14"
#define DC_KERNEL_MULT_STRIDE_15 "15"
#define DC_KERNEL_MULT_STRIDE_16 "16"
#define DC_KERNEL_MULT_STRIDE_17 "17"
#define DC_KERNEL_MULT_STRIDE_18 "18"
asm volatile(
// Compiled code used block of 32 for spill out of total stack of 112.
"sub sp, sp, #32\n" // =112
"ldr w8, [%[function_params], #" STR(DP_OFFSET_DEPTH_MICRO_REPEATS) "]\n"
"cmp w8, #1\n" // =1
"b.lt " DC_KERNEL_MULT_STRIDE_18 "f\n"
// %bb.1:
"ldr w7, [%[function_params], #" STR(DP_OFFSET_OUTPUT_RESIDUAL_WIDTH) "]\n"
"ldp w12, w22, [%[function_params], #" STR(DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS) "]\n"
"ldpsw x10, x11, [%[function_params], #" STR(DP_OFFSET_OUTPUT_HEIGHT_STRIDE) "]\n"
"ldrsw x17, [%[function_params], #" STR(DP_OFFSET_OUTPUT_DEPTH) "]\n"
"add x13, %[function_params], #" STR(DP_OFFSET_OUTPUT_OFFSET) "\n" // =28
"add x14, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MAX) "\n" // =44
"add x6, %[function_params], #" STR(DP_OFFSET_QUANTIZED_ACTIVATION_MIN) "\n" // =40
"cmp w7, #2\n" // =2
"ldp x15, x16, [%[function_params], #" STR(DP_OFFSET_OUTPUT_MULTPLIPLIER_PER_CHANNEL) "]\n"
"ldr w4, [%[function_params], #" STR(DP_OFFSET_OUTBOUND_BLOCK_HEIGHT) "]\n"
"ld1r { v0.8h }, [x13]\n"
"ld1r { v1.8b }, [x6]\n"
"ld1r { v2.8b }, [x14]\n"
"ccmp w22, w12, #0, lt\n"
"add x13, x10, x17\n"
"str x22, [sp]\n" // 8-byte Folded Spill
"csel w22, w22, w12, lt\n"
"lsl x6, x11, #1\n"
"add x21, x13, #4\n" // =4
"bic w13, w22, w22, asr #31\n"
"mov x9, xzr\n"
"add x5, %[scratch_block_data], #4\n" // =4
"str w7, [sp, #12]\n" // 4-byte Folded Spill
"add x7, x17, #4\n" // =4
"add x19, x10, #4\n" // =4
"add x20, x6, x11\n"
"lsl x14, x13, #2\n"
"sub x13, x12, x13\n"
"stp x13, x14, [sp, #16]\n" // 16-byte Folded Spill
"b " DC_KERNEL_MULT_STRIDE_3 "f\n"
DC_KERNEL_MULT_STRIDE_2 ":\n" // in Loop: Header=BB108_3 Depth=1
"add x9, x9, #1\n" // =1
"cmp x9, x8\n"
"b.eq " DC_KERNEL_MULT_STRIDE_18 "f\n"
DC_KERNEL_MULT_STRIDE_3 ":\n" // =>This Loop Header: Depth=1
// Child Loop BB108_16 Depth 2
// Child Loop BB108_11 Depth 2
// Child Loop BB108_6 Depth 2
// Child Loop BB108_13 Depth 2
"lsl w13, w9, #3\n"
"lsl x14, x13, #2\n"
"add x23, x16, x14\n"
"ldp q19, q20, [x23]\n"
"ldr w23, [%[scratch_block_data]]\n"
"add x14, x15, x14\n"
"ldp q21, q22, [x14]\n"
"add x14, %[scratch_block_data], x11\n"
"fmov s23, w23\n"
"mov v23.s[1], w23\n"
"ld1 { v23.s }[2], [x14]\n"
"ldp q3, q4, [%[filter_workspace]]\n"
"ldp q5, q6, [%[filter_workspace], #32]\n"
"ldp q7, q16, [%[filter_workspace], #64]\n"
"ldp q17, q18, [%[bias_data]], #32\n"
"ldr s24, [%[scratch_block_data], x6]\n"
"add %[filter_workspace], x1, #96\n" // =96
"add x25, %[output_block_data], x13\n"
"cmp w4, #2\n" // =2
"mov v23.s[3], w23\n"
"b.ne " DC_KERNEL_MULT_STRIDE_8 "f\n"
// %bb.4: // in Loop: Header=BB108_3 Depth=1
"dup v24.4s, v24.s[0]\n"
"add x13, %[scratch_block_data], x20\n"
"add x14, %[scratch_block_data], x11, lsl #2\n"
"ld1 { v24.s }[2], [x13]\n"
"ld1r { v25.4s }, [x14]\n"
"cmp w22, #1\n" // =1
"lsl x26, x11, #2\n"
"b.lt " DC_KERNEL_MULT_STRIDE_12 "f\n"
// %bb.5: // in Loop: Header=BB108_3 Depth=1
"mov x27, xzr\n"
"mov x28, x22\n"
DC_KERNEL_MULT_STRIDE_6 ":\n" // Parent Loop BB108_3 Depth=1
// => This Inner Loop Header: Depth=2
"and x13, x27, #0xfffffffc\n"
"add x13, x5, x13\n"
"mov x23, x13\n"
"ld1 { v23.s }[1], [x23], x26\n"
"add x24, x13, x6\n"
"ld1 { v24.s }[1], [x24]\n"
"add x14, x13, x11\n"
"add x24, x13, x20\n"
"ld1 { v23.s }[3], [x14]\n"
"ld1 { v24.s }[3], [x24]\n"
"mov v27.16b, v17.16b\n"
"ld1 { v25.s }[1], [x23]\n"
"mov v28.16b, v17.16b\n"
".word 0x4f97e07b // sdot v27.4s, v3.16b, v23.4b[0]\n"
".word 0x4f98e07c // sdot v28.4s, v3.16b, v24.4b[0]\n"
".word 0x4f97e8bb // sdot v27.4s, v5.16b, v23.4b[2]\n"
".word 0x4f98e8bc // sdot v28.4s, v5.16b, v24.4b[2]\n"
".word 0x4f98e0fb // sdot v27.4s, v7.16b, v24.4b[0]\n"
".word 0x4f99e0fc // sdot v28.4s, v7.16b, v25.4b[0]\n"
"sqrdmulh v27.4s, v27.4s, v21.4s\n"
"sqrdmulh v28.4s, v28.4s, v21.4s\n"
"sqrshl v27.4s, v27.4s, v19.4s\n"
"sqrshl v28.4s, v28.4s, v19.4s\n"
"sqxtn v31.4h, v27.4s\n"
"sqxtn2 v31.8h, v28.4s\n"
"mov v29.16b, v18.16b\n"
"sqadd v28.8h, v31.8h, v0.8h\n"
"mov v30.16b, v18.16b\n"
"sqxtn v28.8b, v28.8h\n"
".word 0x4f97e09d // sdot v29.4s, v4.16b, v23.4b[0]\n"
"add x13, x25, x19\n"
"smax v28.8b, v28.8b, v1.8b\n"
".word 0x4f98e09e // sdot v30.4s, v4.16b, v24.4b[0]\n"
".word 0x4f97e8dd // sdot v29.4s, v6.16b, v23.4b[2]\n"
"sub x23, x13, #4\n" // =4
"smin v28.8b, v28.8b, v2.8b\n"
".word 0x4f98e8de // sdot v30.4s, v6.16b, v24.4b[2]\n"
".word 0x4f98e21d // sdot v29.4s, v16.16b, v24.4b[0]\n"
"str s28, [x25]\n"
"st1 { v28.s }[1], [x23]\n"
".word 0x4f99e21e // sdot v30.4s, v16.16b, v25.4b[0]\n"
"sqrdmulh v28.4s, v29.4s, v22.4s\n"
"sqrdmulh v29.4s, v30.4s, v22.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"sqrshl v29.4s, v29.4s, v20.4s\n"
"sqxtn v28.4h, v28.4s\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqadd v28.8h, v28.8h, v0.8h\n"
"sqxtn v28.8b, v28.8h\n"
"smax v28.8b, v28.8b, v1.8b\n"
"smin v28.8b, v28.8b, v2.8b\n"
"mov v26.16b, v17.16b\n"
"str s28, [x25, #4]\n"
"mov v29.16b, v18.16b\n"
"st1 { v28.s }[1], [x13]\n"
"ushr v28.2d, v23.2d, #16\n"
".word 0x4f9ce07a // sdot v26.4s, v3.16b, v28.4b[0]\n"
".word 0x4f9ce09d // sdot v29.4s, v4.16b, v28.4b[0]\n"
"mov v27.16b, v17.16b\n"
"mov v30.16b, v18.16b\n"
".word 0x4f9ce8ba // sdot v26.4s, v5.16b, v28.4b[2]\n"
".word 0x4f9ce8dd // sdot v29.4s, v6.16b, v28.4b[2]\n"
"ushr v28.2d, v24.2d, #16\n"
".word 0x4f9ce07b // sdot v27.4s, v3.16b, v28.4b[0]\n"
".word 0x4f9ce09e // sdot v30.4s, v4.16b, v28.4b[0]\n"
".word 0x4f9ce8bb // sdot v27.4s, v5.16b, v28.4b[2]\n"
".word 0x4f9ce8de // sdot v30.4s, v6.16b, v28.4b[2]\n"
".word 0x4f9ce0fa // sdot v26.4s, v7.16b, v28.4b[0]\n"
".word 0x4f9ce21d // sdot v29.4s, v16.16b, v28.4b[0]\n"
"ushr v28.2d, v25.2d, #16\n"
".word 0x4f9ce0fb // sdot v27.4s, v7.16b, v28.4b[0]\n"
"sqrdmulh v26.4s, v26.4s, v21.4s\n"
"sqrdmulh v27.4s, v27.4s, v21.4s\n"
"sqrshl v26.4s, v26.4s, v19.4s\n"
"sqrshl v27.4s, v27.4s, v19.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v26.8h, v26.8h, v0.8h\n"
".word 0x4f9ce21e // sdot v30.4s, v16.16b, v28.4b[0]\n"
"sqrdmulh v28.4s, v29.4s, v22.4s\n"
"sqxtn v26.8b, v26.8h\n"
"add x24, x25, x21\n"
"sqrdmulh v29.4s, v30.4s, v22.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"smax v26.8b, v26.8b, v1.8b\n"
"add x23, x25, x7\n"
"sub x13, x24, #4\n" // =4
"sqrshl v29.4s, v29.4s, v20.4s\n"
"sqxtn v28.4h, v28.4s\n"
"smin v26.8b, v26.8b, v2.8b\n"
"stur s26, [x23, #-4]\n"
"st1 { v26.s }[1], [x13]\n"
"sqxtn2 v28.8h, v29.4s\n"
"sqadd v26.8h, v28.8h, v0.8h\n"
"sqxtn v26.8b, v26.8h\n"
"add x14, x25, x17\n"
"smax v26.8b, v26.8b, v1.8b\n"
"subs x28, x28, #1\n" // =1
"ushr v23.2d, v23.2d, #32\n"
"ushr v24.2d, v24.2d, #32\n"
"ushr v25.2d, v25.2d, #32\n"
"add x25, x14, x17\n"
"smin v26.8b, v26.8b, v2.8b\n"
"add x27, x27, #4\n" // =4
"str s26, [x23]\n"
"st1 { v26.s }[1], [x24]\n"
"b.ne " DC_KERNEL_MULT_STRIDE_6 "b\n"
// %bb.7: // in Loop: Header=BB108_3 Depth=1
"mov w13, w22\n"
"cmp w13, w12\n"
"ldp x13, x27, [sp, #16]\n" // 16-byte Folded Reload
"b.lt " DC_KERNEL_MULT_STRIDE_13 "f\n"
"b " DC_KERNEL_MULT_STRIDE_2 "b\n"
DC_KERNEL_MULT_STRIDE_8 ":\n" // in Loop: Header=BB108_3 Depth=1
"cmp w12, #1\n" // =1
"b.lt " DC_KERNEL_MULT_STRIDE_2 "b\n"
// %bb.9: // in Loop: Header=BB108_3 Depth=1
"ldr w13, [sp, #12]\n" // 4-byte Folded Reload
"dup v24.4s, v24.s[0]\n"
"cmp w13, #2\n" // =2
"b.ne " DC_KERNEL_MULT_STRIDE_14 "f\n"
// %bb.10: // in Loop: Header=BB108_3 Depth=1
"mov x26, xzr\n"
"mov x13, x12\n"
DC_KERNEL_MULT_STRIDE_11 ":\n" // Parent Loop BB108_3 Depth=1
// => This Inner Loop Header: Depth=2
"and x14, x26, #0xfffffffc\n"
"add x14, x5, x14\n"
"mov x23, x14\n"
"ld1 { v23.s }[1], [x23], x6\n"
"add x14, x14, x11\n"
"mov v26.16b, v17.16b\n"
"mov v27.16b, v18.16b\n"
"ld1 { v24.s }[1], [x23]\n"
"ld1 { v23.s }[3], [x14]\n"
"mov v25.16b, v17.16b\n"
"add x14, x25, x17\n"
"ushr v28.2d, v24.2d, #16\n"
".word 0x4f9ce0fa // sdot v26.4s, v7.16b, v28.4b[0]\n"
".word 0x4f9ce21b // sdot v27.4s, v16.16b, v28.4b[0]\n"
"ushr v28.2d, v23.2d, #16\n"
".word 0x4f9ce07a // sdot v26.4s, v3.16b, v28.4b[0]\n"
".word 0x4f9ce09b // sdot v27.4s, v4.16b, v28.4b[0]\n"
".word 0x4f9ce8ba // sdot v26.4s, v5.16b, v28.4b[2]\n"
".word 0x4f9ce8db // sdot v27.4s, v6.16b, v28.4b[2]\n"
"mov v28.16b, v18.16b\n"
".word 0x4f98e0f9 // sdot v25.4s, v7.16b, v24.4b[0]\n"
".word 0x4f98e21c // sdot v28.4s, v16.16b, v24.4b[0]\n"
".word 0x4f97e079 // sdot v25.4s, v3.16b, v23.4b[0]\n"
".word 0x4f97e09c // sdot v28.4s, v4.16b, v23.4b[0]\n"
".word 0x4f97e8b9 // sdot v25.4s, v5.16b, v23.4b[2]\n"
".word 0x4f97e8dc // sdot v28.4s, v6.16b, v23.4b[2]\n"
"sqrdmulh v25.4s, v25.4s, v21.4s\n"
"sqrdmulh v28.4s, v28.4s, v22.4s\n"
"sqrshl v25.4s, v25.4s, v19.4s\n"
"sqrshl v28.4s, v28.4s, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v28.4s\n"
"sqadd v25.8h, v25.8h, v0.8h\n"
"sqrdmulh v26.4s, v26.4s, v21.4s\n"
"sqxtn v25.8b, v25.8h\n"
"sqrdmulh v27.4s, v27.4s, v22.4s\n"
"sqrshl v26.4s, v26.4s, v19.4s\n"
"smax v25.8b, v25.8b, v1.8b\n"
"sqrshl v27.4s, v27.4s, v20.4s\n"
"sqxtn v26.4h, v26.4s\n"
"smin v25.8b, v25.8b, v2.8b\n"
"str d25, [x25]\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v25.8h, v26.8h, v0.8h\n"
"sqxtn v25.8b, v25.8h\n"
"smax v25.8b, v25.8b, v1.8b\n"
"smin v25.8b, v25.8b, v2.8b\n"
"subs x13, x13, #1\n" // =1
"ushr v24.2d, v24.2d, #32\n"
"ushr v23.2d, v23.2d, #32\n"
"str d25, [x25, x17]\n"
"add x25, x14, x17\n"
"add x26, x26, #4\n" // =4
"b.ne " DC_KERNEL_MULT_STRIDE_11 "b\n"
"b " DC_KERNEL_MULT_STRIDE_2 "b\n"
DC_KERNEL_MULT_STRIDE_12 ":\n" // in Loop: Header=BB108_3 Depth=1
"mov w13, wzr\n"
"cmp w13, w12\n"
"ldp x13, x27, [sp, #16]\n" // 16-byte Folded Reload
"b.ge " DC_KERNEL_MULT_STRIDE_2 "b\n"
DC_KERNEL_MULT_STRIDE_13 ":\n" // Parent Loop BB108_3 Depth=1
// => This Inner Loop Header: Depth=2
"and x14, x27, #0xfffffffc\n"
"add x14, x5, x14\n"
"mov x24, x14\n"
"add x23, x14, x6\n"
"ld1 { v23.s }[1], [x24], x26\n"
"ld1 { v24.s }[1], [x23]\n"
"add x23, x14, x11\n"
"add x14, x14, x20\n"
"ld1 { v23.s }[3], [x23]\n"
"ld1 { v24.s }[3], [x14]\n"
"mov v26.16b, v17.16b\n"
"ld1 { v25.s }[1], [x24]\n"
"mov v27.16b, v17.16b\n"
".word 0x4f97e07a // sdot v26.4s, v3.16b, v23.4b[0]\n"
".word 0x4f98e07b // sdot v27.4s, v3.16b, v24.4b[0]\n"
".word 0x4f97e8ba // sdot v26.4s, v5.16b, v23.4b[2]\n"
".word 0x4f98e8bb // sdot v27.4s, v5.16b, v24.4b[2]\n"
".word 0x4f98e0fa // sdot v26.4s, v7.16b, v24.4b[0]\n"
".word 0x4f99e0fb // sdot v27.4s, v7.16b, v25.4b[0]\n"
"sqrdmulh v26.4s, v26.4s, v21.4s\n"
"sqrdmulh v27.4s, v27.4s, v21.4s\n"
"sqrshl v26.4s, v26.4s, v19.4s\n"
"sqrshl v27.4s, v27.4s, v19.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v26.8h, v26.8h, v0.8h\n"
"sqxtn v26.8b, v26.8h\n"
"smax v26.8b, v26.8b, v1.8b\n"
"add x14, x25, x10\n"
"mov v27.16b, v18.16b\n"
"smin v26.8b, v26.8b, v2.8b\n"
"str s26, [x25]\n"
"st1 { v26.s }[1], [x14]\n"
"mov v26.16b, v18.16b\n"
".word 0x4f97e09b // sdot v27.4s, v4.16b, v23.4b[0]\n"
".word 0x4f98e09a // sdot v26.4s, v4.16b, v24.4b[0]\n"
".word 0x4f97e8db // sdot v27.4s, v6.16b, v23.4b[2]\n"
".word 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n"
".word 0x4f98e21b // sdot v27.4s, v16.16b, v24.4b[0]\n"
".word 0x4f99e21a // sdot v26.4s, v16.16b, v25.4b[0]\n"
"sqrdmulh v27.4s, v27.4s, v22.4s\n"
"sqrdmulh v26.4s, v26.4s, v22.4s\n"
"sqrshl v27.4s, v27.4s, v20.4s\n"
"sqrshl v26.4s, v26.4s, v20.4s\n"
"sqxtn v27.4h, v27.4s\n"
"sqxtn2 v27.8h, v26.4s\n"
"sqadd v26.8h, v27.8h, v0.8h\n"
"sqxtn v26.8b, v26.8h\n"
"smax v26.8b, v26.8b, v1.8b\n"
"smin v26.8b, v26.8b, v2.8b\n"
"subs x13, x13, #1\n" // =1
"add x14, x14, #4\n" // =4
"ushr v23.2d, v23.2d, #16\n"
"ushr v24.2d, v24.2d, #16\n"
"ushr v25.2d, v25.2d, #16\n"
"str s26, [x25, #4]\n"
"add x25, x25, x17\n"
"add x27, x27, #4\n" // =4
"st1 { v26.s }[1], [x14]\n"
"b.ne " DC_KERNEL_MULT_STRIDE_13 "b\n"
"b " DC_KERNEL_MULT_STRIDE_2 "b\n"
DC_KERNEL_MULT_STRIDE_14 ":\n" // in Loop: Header=BB108_3 Depth=1
"ldr x27, [sp]\n" // 8-byte Folded Reload
"mov x13, xzr\n"
"mov x26, x12\n"
"b " DC_KERNEL_MULT_STRIDE_16 "f\n"
DC_KERNEL_MULT_STRIDE_15 ":\n" // in Loop: Header=BB108_16 Depth=2
"add x13, x13, #4\n" // =4
"subs x26, x26, #1\n" // =1
"sub x27, x27, #1\n" // =1
"mov v23.16b, v25.16b\n"
"mov v24.16b, v26.16b\n"
"b.eq " DC_KERNEL_MULT_STRIDE_2 "b\n"
DC_KERNEL_MULT_STRIDE_16 ":\n" // Parent Loop BB108_3 Depth=1
// => This Inner Loop Header: Depth=2
"and x14, x13, #0xfffffffc\n"
"add x14, x5, x14\n"
"mov x23, x14\n"
"ld1 { v23.s }[1], [x23], x6\n"
"add x14, x14, x11\n"
"mov v25.16b, v17.16b\n"
"mov v26.16b, v18.16b\n"
"ld1 { v24.s }[1], [x23]\n"
"ld1 { v23.s }[3], [x14]\n"
".word 0x4f98e0f9 // sdot v25.4s, v7.16b, v24.4b[0]\n"
".word 0x4f98e21a // sdot v26.4s, v16.16b, v24.4b[0]\n"
".word 0x4f97e079 // sdot v25.4s, v3.16b, v23.4b[0]\n"
".word 0x4f97e09a // sdot v26.4s, v4.16b, v23.4b[0]\n"
".word 0x4f97e8b9 // sdot v25.4s, v5.16b, v23.4b[2]\n"
".word 0x4f97e8da // sdot v26.4s, v6.16b, v23.4b[2]\n"
"sqrdmulh v25.4s, v25.4s, v21.4s\n"
"sqrdmulh v26.4s, v26.4s, v22.4s\n"
"sqrshl v25.4s, v25.4s, v19.4s\n"
"sqrshl v26.4s, v26.4s, v20.4s\n"
"sqxtn v27.4h, v25.4s\n"
"sqxtn2 v27.8h, v26.4s\n"
"sqadd v26.8h, v27.8h, v0.8h\n"
"sqxtn v26.8b, v26.8h\n"
"smax v26.8b, v26.8b, v1.8b\n"
"smin v26.8b, v26.8b, v2.8b\n"
"ushr v25.2d, v23.2d, #16\n"
"str d26, [x25]\n"
"ushr v26.2d, v24.2d, #16\n"
"add x25, x25, x17\n"
"cbz x27, " DC_KERNEL_MULT_STRIDE_15 "b\n"
// %bb.17: // in Loop: Header=BB108_16 Depth=2
"mov v27.16b, v17.16b\n"
"mov v28.16b, v18.16b\n"
".word 0x4f9ae0fb // sdot v27.4s, v7.16b, v26.4b[0]\n"
".word 0x4f9ae21c // sdot v28.4s, v16.16b, v26.4b[0]\n"
".word 0x4f99e07b // sdot v27.4s, v3.16b, v25.4b[0]\n"
".word 0x4f99e09c // sdot v28.4s, v4.16b, v25.4b[0]\n"
".word 0x4f99e8bb // sdot v27.4s, v5.16b, v25.4b[2]\n"
".word 0x4f99e8dc // sdot v28.4s, v6.16b, v25.4b[2]\n"
"ushr v25.2d, v23.2d, #32\n"
"sqrdmulh v23.4s, v27.4s, v21.4s\n"
"ushr v26.2d, v24.2d, #32\n"
"sqrdmulh v24.4s, v28.4s, v22.4s\n"
"sqrshl v23.4s, v23.4s, v19.4s\n"
"sqrshl v24.4s, v24.4s, v20.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v23.8h, v23.8h, v0.8h\n"
"sqxtn v23.8b, v23.8h\n"
"smax v23.8b, v23.8b, v1.8b\n"
"smin v23.8b, v23.8b, v2.8b\n"
"str d23, [x25]\n"
"add x25, x25, x17\n"
"b " DC_KERNEL_MULT_STRIDE_15 "b\n"
DC_KERNEL_MULT_STRIDE_18 ":\n"
// Compiled intrinsics total stack 112, now 32 for spillage only.
"add sp, sp, #32\n" // =112
:
// Outputs.
[ scratch_block_data ] "+r"(scratch_block_data),
[ filter_workspace ] "+r"(filter_workspace),
[ bias_data ] "+r"(bias_data),
[ output_block_data ] "+r"(output_block_data)
:
// Inputs.
[ function_params ] "r"(function_params)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7",
"v16", "v17", "v18", "v19", "v20",
"v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30",
"v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15",
"x16", "x17", "x19", "x20", "x21", "x22", "x23", "x24", "x25", "x26",
"x27", "x28");
#undef DC_KERNEL_MULT_STRIDE_1
#undef DC_KERNEL_MULT_STRIDE_2
#undef DC_KERNEL_MULT_STRIDE_3
#undef DC_KERNEL_MULT_STRIDE_4
#undef DC_KERNEL_MULT_STRIDE_5
#undef DC_KERNEL_MULT_STRIDE_6
#undef DC_KERNEL_MULT_STRIDE_7
#undef DC_KERNEL_MULT_STRIDE_8
#undef DC_KERNEL_MULT_STRIDE_9
#undef DC_KERNEL_MULT_STRIDE_10
#undef DC_KERNEL_MULT_STRIDE_11
#undef DC_KERNEL_MULT_STRIDE_12
#undef DC_KERNEL_MULT_STRIDE_13
#undef DC_KERNEL_MULT_STRIDE_14
#undef DC_KERNEL_MULT_STRIDE_15
#undef DC_KERNEL_MULT_STRIDE_16
#undef DC_KERNEL_MULT_STRIDE_17
#undef DC_KERNEL_MULT_STRIDE_18
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockNeon(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
#undef DP_OFFSET_INPUT_DEPTH
#undef DP_OFFSET_OUTPUT_DEPTH
#undef DP_OFFSET_STRIDE
#undef DP_OFFSET_BIAS_INCREMENT
//
#undef DP_OFFSET_INPUT_OFFSET
#undef DP_OFFSET_OUTPUT_OFFSET
#undef DP_OFFSET_OUTPUT_MULTIPLIER
#undef DP_OFFSET_OUTPUT_SHIFT
#undef DP_OFFSET_QUANTIZED_ACTIVATION_MIN
#undef DP_OFFSET_QUANTIZED_ACTIVATION_MAX
//
#undef DP_OFFSET_PADDING_LEFT
#undef DP_OFFSET_PADDING_RIGHT
#undef DP_OFFSET_PADDING_TOP
#undef DP_OFFSET_PADDING_BOTTOM
//
#undef DP_OFFSET_DEPTH_MICRO_REPEATS
//
#undef DP_OFFSET_WIDTH_MACRO_COUNT
#undef DP_OFFSET_INPUT_WIDTH_OVERALL_MICRO_REPEATS
#undef DP_OFFSET_INPUT_WIDTH_MICRO_REPEATS
#undef DP_OFFSET_RESIDUAL_WIDTH
#undef DP_OFFSET_OUTPUT_WIDTH_OVERALL_MICRO_REPEATS
#undef DP_OFFSET_OUTPUT_WIDTH_MICRO_REPEATS
#undef DP_OFFSET_OUTPUT_RESIDUAL_WIDTH
#undef DP_OFFSET_WORKSPACE_WIDTH_MICRO_REPEATS
//
#undef DP_OFFSET_HEIGHT_MACRO_COUNT
#undef DP_OFFSET_INBOUND_BLOCK_HEIGHT
#undef DP_OFFSET_OUTBOUND_BLOCK_HEIGHT
#undef DP_OFFSET_INPUT_HEIGHT_STRIDE
#undef DP_OFFSET_OUTPUT_HEIGHT_STRIDE
#undef DP_OFFSET_WORKSPACE_HEIGHT_STRIDE
//
#undef DP_OFFSET_FOUR_OVER_STRIDE
#endif // __aarch64__ && !GOOGLE_L4T - Dot product ops hard-coded
// Top-level implementation function for 3x3 depthwise convolution using NEON
// dot-product instructions.
//
// MACRO & MICRO BLOCKS
//
// The task is divided into macro blocks. Data is copied first into a macro
// block in a workspace. This has two purposes: (a) bringing data into
// cache, and (b) permuting data so that it can be used much more easily in
// a dot-product filter.
//
// When there is no depth multiplication:
//
// The permutations required for dot-products are local, within 4 data points
// down the depth and 4 across the width. We want to pull in input data at least
// 8-bytes at a time, down the depth, and so we divide the macro blocks into
// 1x4x8 (height, width, depth) and further divide the micro blocks into
// sub-blocks with shape (1x4x4).
//
// Each macro-block is constructed from micro-blocks that are internally
// rearranged during loading into the macro-block workspace.
//
// In other words, the micro-block shape is
// {1, 1, 4, 8}
// Each macro block is typically shape
// {1, height_block_size, 4 * workspace_width_micro_repeats, 64}
// and workspace_width_micro_repeats is chosen so it fits into the workspace.
//
// However, if depth < 64, we decrease the macro block depth, enabling us to
// increase the macro-block width.
//
// When there is depth multiplication:
//
// We require input-depth = 1 and exploit that instead. Note that output data
// is still full-depth, *as is the filter and bias data after certain
// adjustments*, and so the filter stage in this case still proceeds in terms of
// sub-blocks.
//
// The Magic of these numbers:
// 4 is the number of input elements used in each dot-product.
// 8 is the number of inputs we load at a time into a register.
// 64 is min amount of data to be loaded in a stretch (when possible).
//
// FILTER DATA PREPARATION
//
// Filter data needs to be permuted in a fashion like that of input data, and
// this is done in a preprocessing stage. In addition, this stage extends the
// filter in the direction of width from 3 to 4. The extra filter taps are set
// to zero so that input data does not have to be zeroed before applying
// dot-products.
//
// OVERALL COUNTS: HANDLING TRAILING ITERATION
//
// Often it is necessary to handle the last iteration in a loop differently,
// generally because the final item is shorter. The logic to detect the
// special case can be a bit expensive. We use a scheme in which there are
// two counts, in a pattern like xxx_yyy_repeats and
// xxx_overall_yyy_repeats. The first gives the count of "normal"
// iterations. The loop iterates over the second count, and the induction
// variable is checked to see if it reaches xxx_yyy_repeats. If there is no
// special trailing iteration, xxx_yyy_repeats = xxx_overall_yyy_repeats,
// and the special code is not executed.
//
// Example:
// Suppose that we characterize a size s as
// f(s) -> (block-4-repetitions, remainder, overall_repetitions):
// f(11) -> (2, 3, 3)
// f(12) -> (3, 0, 3)
// f(13) -> (3, 1, 4)
//
// POINTING OUTSIDE OF INPUT ARRAY.
//
// When there is padding, the input data pointer passed to the fill routines
// points outside of the input array and into a kind-of virtual padded
// margin. It turns out that this simplifies the code and removes
// conditional statements. It is hard to explain why without comparing two
// versions of the code. In summary, this way the adjustment into the margin
// can be made unconditionally, and the correction back into the input array
// is done where there is a conditional already.
//
// OVERLAP
//
// Since this is *depthwise* conv, neither the batch nor the depth have overlap.
// The height and depth overlap by (filter_size - 1). Thus some data is used
// twice on the borders of macro blocks.
//
template <DepthwiseConvImplementation implementation,
QuantizationType quantization_type>
inline void DepthwiseConvDotProduct3x3Impl(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data,
const RuntimeShape& filter_shape,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
filter_data,
const RuntimeShape& bias_shape, const int32* bias_data,
const RuntimeShape& output_shape,
typename QuantizationTypeImpl<quantization_type>::ExternalType* output_data,
int thread_start, int thread_end, int thread_dim) {
// Check kernel restrictions.
constexpr int filter_size = 3;
constexpr int kMaxStride = 2;
constexpr int kMaxPadding = 1;
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
TFLITE_DCHECK_EQ(params.weights_offset, -kSymmetricZeroPoint);
TFLITE_DCHECK_LE(params.stride_width, kMaxStride);
TFLITE_DCHECK_EQ(params.stride_height, params.stride_width);
TFLITE_DCHECK_EQ(params.dilation_width_factor, 1);
TFLITE_DCHECK_EQ(params.dilation_height_factor, 1);
TFLITE_DCHECK_LE(params.padding_values.width, kMaxPadding);
TFLITE_DCHECK_LE(params.padding_values.height, kMaxPadding);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Key kernel parameters (along with padding handled later).
const int stride = params.stride_width;
const int depth_multiplier = params.depth_multiplier;
const bool has_depth_multiplication = depth_multiplier > 1;
// Extract task dimensions.
const int input_depth = input_shape.Dims(3);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
TFLITE_DCHECK(!has_depth_multiplication || input_depth == 1);
TFLITE_DCHECK(has_depth_multiplication || input_depth == output_depth);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
TFLITE_DCHECK_EQ(input_depth * depth_multiplier, output_depth);
TFLITE_DCHECK_EQ(MatchingDim(filter_shape, 1, filter_shape, 2), filter_size);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
// Return now if nothing to do.
if (output_width == 0 || output_height == 0) {
return;
}
// Kernel parameter structure: set basic fields.
//
// In asm it is easier to pass a structure than more than, say, 8 parameters.
DepthwiseConvDotProdParams function_params;
function_params.input_depth = input_depth;
function_params.output_depth = output_depth;
function_params.input_offset = params.input_offset;
function_params.output_offset = params.output_offset;
function_params.output_multiplier = params.output_multiplier;
function_params.output_shift = params.output_shift;
function_params.quantized_activation_min = params.quantized_activation_min;
function_params.quantized_activation_max = params.quantized_activation_max;
function_params.stride = stride;
// Handle inbound bias data.
//
// Note that this data is adjusted in a per-depth process before the main
// filters. The adjustment accounts for a non-symmetric input offset.
//
// Kernel subroutines need to be able to operate consistently on an bias
// array. Where there is no bias, we provide one filled with zeros.
constexpr int kMinBiasLoad = 8;
int32 zero_bias_data[kMinBiasLoad];
int32 bias_increment;
if (bias_data) {
bias_increment = 4;
} else {
memset(zero_bias_data, 0, sizeof(zero_bias_data));
bias_data = &zero_bias_data[0];
bias_increment = 0;
}
function_params.bias_increment = bias_increment;
TFLITE_DCHECK_LE(2 * function_params.bias_increment, kMinBiasLoad);
// Process multithreading.
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_height;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_height);
row_start = thread_start;
row_end = thread_end;
break;
}
const int row_count = row_end - row_start;
// Process padding.
//
// Whether "correct" or not, this matches ComputeConvSizes. When there is
// stride > 1 there can be padding on the bottom or top, and therefore
// we need to consider padding. This is true even if one or other of the
// padding_values is 0.
const int padded_width = (output_width - 1) * stride + filter_size;
int full_padding_top;
{
const int padding_left = params.padding_values.width;
// Right padding would be -1 if discarding input because of stride.
const int padding_right =
std::max(padded_width - input_width - padding_left, 0);
int padding_top = params.padding_values.height;
const int padded_height = (output_height - 1) * stride + filter_size;
int padding_bottom =
std::max(padded_height - input_height - padding_top, 0);
TFLITE_DCHECK_LE(padding_left, padding_right);
TFLITE_DCHECK_LE(padding_top, padding_bottom);
full_padding_top = padding_top;
if (row_start != 0) {
padding_top = 0;
}
if (row_end != output_height) {
padding_bottom = 0;
}
function_params.padding_left = padding_left;
function_params.padding_right = padding_right;
function_params.padding_top = padding_top;
function_params.padding_bottom = padding_bottom;
}
// When stride == 1 left or top padding may only be non-zero.
// This is when padding is specified but not needed on a trailing dimension.
// When stride == 2 right or bottom padding may only be non-zero.
// This is a result of the details of the padding calculations.
const bool padding_required =
function_params.padding_left > 0 || function_params.padding_top > 0 ||
function_params.padding_right > 0 || function_params.padding_bottom > 0;
// Choose parameter-specific kernel subroutines.
//
// The main part of the kernel has two stages. First, a temporary workspace is
// filled with padded and permuted data. Second, the filter is applied to the
// workspace data to generate output.
//
// The workspace fill stage handles padding so that the filter stage does not
// need to account for it. The workspace fill stage does not need to
// understand striding, and implicitly handles striding through the parameters
// that it is given.
using pack_macro_block_func_t = decltype(
&PackMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
0>::Run);
using kernel_macro_block_func_t = decltype(
&KernelMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
1>::Run);
pack_macro_block_func_t pack_macro_block_func;
kernel_macro_block_func_t kernel_macro_block_func;
{
if (has_depth_multiplication) {
if (padding_required) {
pack_macro_block_func =
PackMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*max_padding=*/1>::Run;
} else {
pack_macro_block_func =
PackMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*max_padding=*/0>::Run;
}
if (stride == 1) {
kernel_macro_block_func =
KernelMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/1>::Run;
} else {
kernel_macro_block_func =
KernelMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/2>::Run;
}
} else {
if (padding_required) {
pack_macro_block_func =
PackMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/1>::Run;
} else {
pack_macro_block_func =
PackMacroBlock<implementation, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/0>::Run;
}
if (stride == 1) {
kernel_macro_block_func = KernelMacroBlock<
implementation, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/1>::Run;
} else {
kernel_macro_block_func = KernelMacroBlock<
implementation, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/2>::Run;
}
}
}
// Stride-only variables.
//
const int row_count_per_macro = stride == 1 ? 4 : 2;
// row_count_per_macro * stride:
constexpr int input_height_per_macro = 4;
// Number of rows per micro block (= rows per macro block) is
// (row_count_per_macro - 1) * stride + 1 + (filter_size - 1)
const int height_block_size = stride == 1 ? 3 + filter_size : 2 + filter_size;
const int input_height_overlap = filter_size - stride;
// stride == 1 ? 4 : 2:
function_params.four_over_stride = row_count_per_macro;
TFLITE_DCHECK_EQ(stride * function_params.four_over_stride, 4);
TFLITE_DCHECK_EQ(height_block_size,
input_height_per_macro + input_height_overlap);
// Create workspaces.
//
// Filter workspace is for shuffle: only first depth/8 is used.
// indexed as [depth/8][sub-block][height][depth][width].
TFLITE_DCHECK_EQ(kDepthwiseConvAdjustedBiasLimit % 8, 0);
int8 macroblock_workspace[kDepthwiseConvScratchWorkspaceSize];
int32 adjusted_bias_data[kDepthwiseConvAdjustedBiasLimit];
int8 filter_workspace[kDepthwiseConvAdjustedBiasLimit >> 3][3][2][4][4];
// Output depth characterization.
//
const int depth_macro_count = output_depth / 64;
const int depth_overall_macro_count = (output_depth + 63) / 64;
// Number of micro blocks down the depth in a final incomplete macro block.
const int depth_trailing_micro_repeats = output_depth / 8 % 8;
// The output_depth may not have a remainder: it must be a multiple of 8.
TFLITE_DCHECK_EQ(output_depth,
64 * depth_macro_count + 8 * depth_trailing_micro_repeats);
// Characterize the first macro block depth, the largest.
//
// We base treatment of the width on the trailing macro block if there are
// no full blocks, in order to do more work together (that is, increase
// workspace_width_micro_repeats when largest_macro_depth < 64).
const int largest_macro_depth =
has_depth_multiplication
? 1
: (depth_macro_count > 0 ? 64 : 8 * depth_trailing_micro_repeats);
// Characterize width, consumption of input and generation of output.
//
// In the case of depth multiplication, we ensure that some of the workspace
// at the end remains unused. This enables the filter routines to load the
// "next" data, of at least 16 bytes, even when at the end of the workspace.
// It is relatively expensive to detect the end micro block. It is also very
// difficult to test for (to trigger) erroneous reads (past end of array) in
// the depth multiplication case.
int workspace_width_micro_repeats =
(has_depth_multiplication
? kDepthwiseConvScratchWorkspaceSize - kWorkspaceExtension
: kDepthwiseConvScratchWorkspaceSize) /
(4 * largest_macro_depth * height_block_size);
// When there is no depth multiplication, the workspace depth is a multiple of
// 8, which ensures that workspace rows are 16-byte aligned. (Actually 32,
// because of the micro width of 4.) This is not necessarily the case under
// depth multiplication, so we adjust now to impose this restriction.
if (has_depth_multiplication) {
workspace_width_micro_repeats = (workspace_width_micro_repeats / 4) * 4;
}
TFLITE_DCHECK_EQ((workspace_width_micro_repeats * largest_macro_depth) % 4,
0);
// Discount 1 of the micro-block repeats in each macro block to account for
// overlap.
const int consumed_width_per_macro_block =
4 * (workspace_width_micro_repeats - 1);
const int output_width_per_macro_block =
function_params.four_over_stride * (workspace_width_micro_repeats - 1);
TFLITE_DCHECK_GT(workspace_width_micro_repeats, 1);
TFLITE_DCHECK_EQ(output_width_per_macro_block * stride,
consumed_width_per_macro_block);
// Width repetitions and residuals.
//
// Use of the workspace is characterized primarily in terms of *padded input*.
// Striding only matters in a few places.
//
// Simplifications: We require that there always be at least one full
// micro-block across the width. Since the maximum padding is 1, the trailing
// padding cannot span two micro blocks.
const int residual_micro_width = padded_width % 4;
// We base the count of macro blocks on the amount of padded input data each
// one consumes.
int width_overall_macro_count = (padded_width - residual_micro_width +
consumed_width_per_macro_block - 1) /
consumed_width_per_macro_block;
// Recall that we left a micro block at the end of each macro block for use as
// overlap. There is a special case in which we can use one fewer macro
// blocks, with the last one consuming extra input. (But not if the
// calculation thinks that we can use zero blocks.)
if (padded_width <=
((width_overall_macro_count - 1) * consumed_width_per_macro_block + 4)) {
width_overall_macro_count -= 1;
}
width_overall_macro_count = std::max(width_overall_macro_count, 1);
// We always have to treat the final macro block along width as trailing,
// because even if it is full in terms of padded input, it will be incomplete
// in terms of output.
const int width_macro_count = width_overall_macro_count - 1;
// Micro blocks are traversed in terms of input in fill routines.
const int width_trailing_micro_repeats =
(padded_width - consumed_width_per_macro_block * width_macro_count) / 4;
const int width_overall_trailing_micro_repeats =
(padded_width - consumed_width_per_macro_block * width_macro_count + 3) /
4;
// Micro blocks are traversed in terms of output in filtering routines.
const int residual_output_micro_width =
(output_width - 1) % function_params.four_over_stride + 1;
const int output_width_trailing_micro_repeats =
residual_micro_width > (filter_size - 1)
? width_trailing_micro_repeats
: width_trailing_micro_repeats - 1;
// Check results.
TFLITE_DCHECK_GT(width_overall_trailing_micro_repeats, 0);
TFLITE_DCHECK_EQ(padded_width,
residual_micro_width +
consumed_width_per_macro_block * width_macro_count +
4 * width_trailing_micro_repeats);
TFLITE_DCHECK_LE(width_overall_macro_count, width_macro_count + 1);
TFLITE_DCHECK_GE(width_overall_macro_count, width_macro_count);
// Height repetitions and residuals.
//
int height_macro_count;
int residual_row_count;
int height_overall_macro_count;
if (stride == 1) {
TFLITE_DCHECK_EQ(row_count_per_macro, 4);
height_macro_count = row_count / 4;
residual_row_count = row_count % 4;
height_overall_macro_count = (row_count + 3) / 4;
} else {
TFLITE_DCHECK_EQ(row_count_per_macro, 2);
height_macro_count = row_count / 2;
residual_row_count = row_count % 2;
height_overall_macro_count = (row_count + 1) / 2;
}
TFLITE_DCHECK_EQ(
row_count, residual_row_count + row_count_per_macro * height_macro_count);
TFLITE_DCHECK_LE(height_overall_macro_count, height_macro_count + 1);
TFLITE_DCHECK_GE(height_overall_macro_count, height_macro_count);
// Data strides.
//
const int input_height_stride = input_width * input_depth;
const int output_height_stride = output_width * output_depth;
const int input_batch_stride = input_height_stride * input_height;
const int output_batch_stride = output_height_stride * output_height;
const int input_depth_macro_stride = has_depth_multiplication ? 0 : 64;
const int input_width_macro_stride =
input_depth * consumed_width_per_macro_block;
const int output_width_macro_stride =
output_depth * output_width_per_macro_block;
// Store parameters that do not vary across macro blocks.
//
function_params.workspace_width_micro_repeats = workspace_width_micro_repeats;
function_params.height_macro_count = height_overall_macro_count;
function_params.width_macro_count = width_overall_macro_count;
function_params.input_height_stride = input_height_stride;
function_params.output_height_stride = output_height_stride;
function_params.residual_width = residual_micro_width;
// Prefetch workspace for write, along with any necessary dummy writes.
const int max_workspace_height_stride =
16 * ((workspace_width_micro_repeats + 3) >> 2) * largest_macro_depth;
const int workspace_fill_size = std::min(
kDepthwiseConvScratchWorkspaceSize,
height_block_size * max_workspace_height_stride + kWorkspaceExtension);
WorkspacePrefetchWrite<implementation>::Run(
params.weights_offset, workspace_fill_size, macroblock_workspace);
// Main process.
//
// Most kernels are nested batch-height-width-depth. Here we proceed over
// macro blocks batch-width-depth-height.
//
// Example of handling of trailing iteration: when there is trailing depth,
// depth_overall_macro_count = depth_macro_count + 1, so we can adjust the
// dimensions for trailing macro blocks by looking for
// j_depth == depth_macro_count.
for (int b = batch_start; b < batch_end; ++b) {
for (int k_width = 0; k_width < width_overall_macro_count; ++k_width) {
// Figure out the work to be done for this macro block. If it trails in
// any dimension, the work in that dimension is adjusted.
// The work to be done across widths has 3 cases:
// (a) A full macro block,
// (b) Partial terminal macro block, with input and output ending in
// same micro block, and
// (c) Partial terminal macro block, with output corresponding to one
// fewer micro blocks, because filter extends across micro-block
// boundary.
if (k_width != width_macro_count) {
function_params.output_residual_width = 0;
function_params.input_width_micro_repeats =
workspace_width_micro_repeats;
function_params.input_width_overall_micro_repeats =
workspace_width_micro_repeats;
function_params.output_width_micro_repeats =
workspace_width_micro_repeats - 1;
} else {
function_params.output_residual_width = residual_output_micro_width;
function_params.input_width_micro_repeats =
width_trailing_micro_repeats;
function_params.input_width_overall_micro_repeats =
width_overall_trailing_micro_repeats;
function_params.output_width_micro_repeats =
output_width_trailing_micro_repeats;
}
function_params.output_width_overall_micro_repeats =
function_params.output_residual_width == 0
? function_params.output_width_micro_repeats
: function_params.output_width_micro_repeats + 1;
for (int j_depth = 0; j_depth < depth_overall_macro_count; ++j_depth) {
if (quantization_type == QuantizationType::kPerChannelInt8) {
// Each macro block handles depth of 64 (8 micro). The kernel
// functions receive pointers to quantization data for the block being
// processed.
function_params.output_multiplier_per_channel =
params.output_multiplier_per_channel + 64 * j_depth;
function_params.output_shift_per_channel =
params.output_shift_per_channel + 64 * j_depth;
}
// Process filter and bias data.
//
function_params.depth_micro_repeats =
j_depth == depth_macro_count ? depth_trailing_micro_repeats : 8;
ProcessPerDepth<implementation, quantization_type>::Run(
filter_data + 64 * j_depth,
bias_data + 8 * 2 * bias_increment * j_depth,
filter_workspace[0][0][0][0], adjusted_bias_data, &function_params);
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data_block = input_data + b * input_batch_stride +
j_depth * input_depth_macro_stride +
k_width * input_width_macro_stride -
function_params.padding_left * input_depth +
row_start * stride * input_height_stride -
full_padding_top * input_height_stride;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_block =
output_data + b * output_batch_stride +
row_start * output_height_stride + j_depth * 64 +
k_width * output_width_macro_stride;
// Under depth multiplication the workspace_height_stride does not have
// to depend on input_width_overall_micro_repeats, but this improves the
// compactness of workspace use.
const int workspace_height_stride =
has_depth_multiplication
? 16 * ((function_params.input_width_overall_micro_repeats +
3) >>
2)
: 4 * function_params.input_width_overall_micro_repeats * 8 *
function_params.depth_micro_repeats;
TFLITE_DCHECK_EQ(workspace_height_stride % 16, 0);
function_params.workspace_height_stride = workspace_height_stride;
// For the first macro block for output rows we fill in the first few
// rows. After this we will copy them (see below in loop.)
function_params.inbound_block_height = input_height_overlap;
pack_macro_block_func(-1, k_width, input_data_block,
macroblock_workspace, &function_params);
input_data_block += input_height_stride * input_height_overlap;
for (int i_height = 0; i_height < height_overall_macro_count;
++i_height) {
if (i_height != height_macro_count) {
function_params.inbound_block_height = input_height_per_macro;
function_params.outbound_block_height = row_count_per_macro;
} else {
function_params.inbound_block_height = residual_row_count * stride;
function_params.outbound_block_height = residual_row_count;
}
TFLITE_DCHECK_LT(i_height * row_count_per_macro, row_count);
TFLITE_DCHECK_LT(i_height * input_height_per_macro, input_height);
TFLITE_DCHECK_LT(k_width * output_width_per_macro_block,
output_width);
TFLITE_DCHECK_LT(k_width * consumed_width_per_macro_block,
input_width);
// Macro blocks overlap by input_height_overlap rows, so we copy
// those instead of filling in afresh. The first macro block across
// output rows was filled in outside of the loop (above).
if (i_height > 0) {
memcpy(macroblock_workspace,
macroblock_workspace +
input_height_per_macro * workspace_height_stride,
input_height_overlap * workspace_height_stride);
}
pack_macro_block_func(
i_height, k_width, input_data_block,
macroblock_workspace +
input_height_overlap * workspace_height_stride,
&function_params);
kernel_macro_block_func(
macroblock_workspace, filter_workspace[0][0][0][0],
adjusted_bias_data, output_data_block, &function_params);
input_data_block += input_height_stride * input_height_per_macro;
output_data_block += output_height_stride * row_count_per_macro;
}
}
}
}
}
template <DepthwiseConvImplementation implementation>
inline void DepthwiseConvDotProduct3x3(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, int thread_start, int thread_end, int thread_dim) {
DepthwiseConvDotProduct3x3Impl<
implementation, depthwise_conv::QuantizationType::kNonPerChannelUint8>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start, thread_end,
thread_dim);
}
template <DepthwiseConvImplementation implementation>
inline void DepthwiseConvDotProduct3x3PerChannel(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
int thread_start, int thread_end, int thread_dim) {
DepthwiseConvDotProduct3x3Impl<
implementation, depthwise_conv::QuantizationType::kPerChannelInt8>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start, thread_end,
thread_dim);
}
#undef vst1_lane_8x4
#undef vst1q_lane_8x4
#undef vld1q_lane_s8x8
#undef vld1_lane_8x4
#undef vld1q_lane_8x4
#undef vld1q_dup_s8x4
#undef STR
#undef STR_UNEXPANDED
} // namespace depthwise_conv
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_3X3_FILTER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h | C++ | apache-2.0 | 585,008 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_TRANSITIONAL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_TRANSITIONAL_H_
// This file provides kernel implementations that are not used in shipped
// inference code, but rather (a) show how model C++ code is designed and then
// transformed into asm code, and (b) aid with maintenance and later development
// of variations. Many projects (even including, say, the classic NAG libraries)
// develop highly optimized code, but do not maintain intermediate versions.
// Often the result is incomprehensible final-version code.
#include <algorithm>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
namespace depthwise_conv {
#ifdef USE_NEON
inline void util_vst1_x8(uint8* data_addr, int8x8_t reg) {
return vst1_u8(data_addr, vreinterpret_u8_s8(reg));
}
inline void util_vst1_x8(int8* data_addr, int8x8_t reg) {
return vst1_s8(data_addr, reg);
}
// Lane operations are for clarity and convenience. We want to load and store
// 4 8-bit lanes together. So these are treated much like 32-bit loads and
// 32-bit stores. Stores require 32-bit alignment.
#define vst1_lane_8x4(dst, reg, lane_num) \
TFLITE_DCHECK_EQ(reinterpret_cast<std::uintptr_t>(dst) % 4, 0); \
vst1_lane_u32(reinterpret_cast<uint32_t*>(dst), reg, lane_num)
#define vst1q_lane_8x4(dst, reg, lane_num) \
TFLITE_DCHECK_EQ(reinterpret_cast<std::uintptr_t>(dst) % 4, 0); \
vst1q_lane_u32(reinterpret_cast<uint32_t*>(dst), reg, lane_num)
// Important! Most compilation configurations will compile and run without
// reinterpret_cast. Sanitizers may fail silently on lane-loading, with an
// obscure bug or mis-feature probably in unhygienic macro expansion.
#define vld1q_lane_s8x8(src, reg, lane_num) \
vld1q_lane_u64(reinterpret_cast<const uint64_t*>(src), reg, lane_num)
#define vld1_lane_8x4(src, reg, lane_num) \
vld1_lane_s32(reinterpret_cast<const int32*>(src), reg, lane_num)
#define vld1q_lane_8x4(src, reg, lane_num) \
vld1q_lane_s32(reinterpret_cast<const int32*>(src), reg, lane_num)
#define vld1q_dup_s8x4(src) vld1q_dup_s32(reinterpret_cast<const int32*>(src))
#endif // USE_NEON
template <QuantizationType quantization_type>
struct ProcessPerDepth<DepthwiseConvImplementation::kUseCModel3x3DotProduct,
quantization_type> {
// Filter data is provided as filter_block[3][3][depth/8][2][4]: height 3,
// width 3, sub-block 0 or 1, depth 4. Filter data is written as
// filter_bank[3][2][4][4]; height 3, sub-block, depth 4, width 4.
//
// Note that this rearrangement is much like that performed on input data when
// filling the workspace, and optimized versions will be similar.
static inline void FillFilterBank(int depth, const uint8* filter_block,
int8 filter_bank[3][2][4][4]) {
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
// Load filter data in, 8-bytes down depth / sub-block at a time.
//
// loaded_filter has dimensions height 3, width 4, sub-block 0 or 1,
// depth 4.
uint8 loaded_filter[3][4][2][4];
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 3; ++x) {
memcpy(loaded_filter[y][x][0], &filter_block[3 * y * depth + x * depth],
8);
}
// Pad the filter with symmetric representation of 0, so that the values
// become 0 when the zero-poing is added below. Thus these filter taps are
// effectively disregarded in later filtering.
memset(loaded_filter[y][3][0], kSymmetricZeroPoint, 8);
}
for (int y = 0; y < 3; ++y) {
for (int z = 0; z < 4; ++z) {
for (int x = 0; x < 4; ++x) {
filter_bank[y][0][z][x] =
loaded_filter[y][x][0][z] - kSymmetricZeroPoint;
filter_bank[y][1][z][x] =
loaded_filter[y][x][1][z] - kSymmetricZeroPoint;
}
}
}
}
// Adjust the bias (weights) data according to the input offset.
//
// The output calculation is
// out[h][w][d] = bias[d] + sum_ij (in[h+i][w+j][d] + in_offset) *
// (filter[i][j][d] + filter_offset)
// (where offsets are expressed as differences from 128).
//
// Since we cannot efficiently handle varying offsets / bias across the image,
// we insist on filter_offset = 0.
//
// This function calculates
// adjusted_bias[d] = bias[d] + sum_ij in_offset * filter[i][j][d]
// which accounts for input offset. If the bias is constant over the depth,
// the adjusted bias will vary.
static inline void AdjustBias(int32 input_offset,
const int8 filter_bank[3][2][4][4],
const int32* bias_data,
int32 adjusted_bias_block[2][4]) {
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
TFLITE_DCHECK_GE(input_offset, -255);
TFLITE_DCHECK_LE(input_offset, 0);
// For instance, if input_offset == 128, no adjustment is needed.
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
for (int s = 0; s < 2; ++s) {
for (int z = 0; z < 4; ++z) {
adjusted_bias_block[s][z] = bias_data[4 * s + z];
for (int i = 0; i < 9; ++i) {
adjusted_bias_block[s][z] +=
input_offset_difference * filter_bank[i % 3][s][z][i / 3];
}
}
}
}
static void Run(const uint8* filter_data, const int32* bias_data,
int8* shuffled_filter_data, int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
const int depth = function_params->output_depth;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int bias_increment = function_params->bias_increment;
const int32 input_offset = function_params->input_offset;
int8 filter_bank[3][2][4][4];
int32 adjusted_bias_block[2][4];
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
FillFilterBank(depth, filter_data + 8 * j_depth, filter_bank);
AdjustBias(input_offset, filter_bank,
bias_data + 2 * bias_increment * j_depth, adjusted_bias_block);
memcpy(shuffled_filter_data, filter_bank[0][0][0],
shuffled_filter_increment);
shuffled_filter_data += shuffled_filter_increment;
memcpy(adjusted_bias_data, adjusted_bias_block[0],
8 * sizeof(adjusted_bias_block[0][0]));
adjusted_bias_data += 8;
}
}
};
template <QuantizationType quantization_type>
struct ProcessPerDepth<DepthwiseConvImplementation::kUseUnwound3x3DotProduct,
quantization_type> {
static inline void Run(const uint8* filter_data, const int32* bias_data,
int8* shuffled_filter_data, int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
const int depth = function_params->output_depth;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int bias_increment = function_params->bias_increment;
// Simulate NEON-register transposition of subset of filter.
int8 filter_bank_a_0[4][4]; // Depth 4, width 4.
int8 filter_bank_a_1[4][4];
int8 filter_bank_a_2[4][4];
int8 filter_bank_b_0[4][4];
int8 filter_bank_b_1[4][4];
int8 filter_bank_b_2[4][4];
// Load filter data in, essentially dropping the [depth/8] dimension, which
// is equivalent to loading just the depth needed for one micro-block.
//
// loaded_filter has dimensions height 3, width 4, sub-block 0 or 1,
// depth 4.
uint8 loaded_filter_0[4][2][4];
uint8 loaded_filter_1[4][2][4];
uint8 loaded_filter_2[4][2][4];
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int32 input_offset = function_params->input_offset;
TFLITE_DCHECK_GE(input_offset, -255);
TFLITE_DCHECK_LE(input_offset, 0);
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
const uint8* filter_block = filter_data + 8 * j_depth;
// Filter data is provided as filter_block[3][3][depth/8][2][4].
// height 3, width 3, micro-blocks, sub-block 0 or 1, depth 4.
// filter_bank[3][2][4][4]; Sub-block, height 3, depth 4, width 4.
for (int x = 0; x < 3; ++x) {
memcpy(loaded_filter_0[x][0], &filter_block[3 * 0 * depth + x * depth],
8);
memcpy(loaded_filter_1[x][0], &filter_block[3 * 1 * depth + x * depth],
8);
memcpy(loaded_filter_2[x][0], &filter_block[3 * 2 * depth + x * depth],
8);
}
// Pad the filter with -filter_offset, so that the values become 0 when
// the filter_offset is later added, and so the filter tap is effectively
// disregarded.
memset(loaded_filter_0[3][0], kSymmetricZeroPoint, 8);
memset(loaded_filter_1[3][0], kSymmetricZeroPoint, 8);
memset(loaded_filter_2[3][0], kSymmetricZeroPoint, 8);
for (int z = 0; z < 4; ++z) {
for (int x = 0; x < 4; ++x) {
filter_bank_a_0[z][x] =
loaded_filter_0[x][0][z] - kSymmetricZeroPoint;
filter_bank_b_0[z][x] =
loaded_filter_0[x][1][z] - kSymmetricZeroPoint;
filter_bank_a_1[z][x] =
loaded_filter_1[x][0][z] - kSymmetricZeroPoint;
filter_bank_b_1[z][x] =
loaded_filter_1[x][1][z] - kSymmetricZeroPoint;
filter_bank_a_2[z][x] =
loaded_filter_2[x][0][z] - kSymmetricZeroPoint;
filter_bank_b_2[z][x] =
loaded_filter_2[x][1][z] - kSymmetricZeroPoint;
}
}
memcpy(shuffled_filter_data, filter_bank_a_0, 16);
shuffled_filter_data += 16;
memcpy(shuffled_filter_data, filter_bank_b_0, 16);
shuffled_filter_data += 16;
memcpy(shuffled_filter_data, filter_bank_a_1, 16);
shuffled_filter_data += 16;
memcpy(shuffled_filter_data, filter_bank_b_1, 16);
shuffled_filter_data += 16;
memcpy(shuffled_filter_data, filter_bank_a_2, 16);
shuffled_filter_data += 16;
memcpy(shuffled_filter_data, filter_bank_b_2, 16);
shuffled_filter_data += 16;
int32 adjusted_bias_data_0[4];
int32 adjusted_bias_data_1[4];
// For instance, if input_offset == 128, no adjustment is needed.
for (int z = 0; z < 4; ++z) {
adjusted_bias_data_0[z] = bias_data[z];
adjusted_bias_data_1[z] = bias_data[4 + z];
for (int x = 0; x < 4; ++x) {
adjusted_bias_data_0[z] +=
input_offset_difference * filter_bank_a_0[z][x];
adjusted_bias_data_0[z] +=
input_offset_difference * filter_bank_a_1[z][x];
adjusted_bias_data_0[z] +=
input_offset_difference * filter_bank_a_2[z][x];
adjusted_bias_data_1[z] +=
input_offset_difference * filter_bank_b_0[z][x];
adjusted_bias_data_1[z] +=
input_offset_difference * filter_bank_b_1[z][x];
adjusted_bias_data_1[z] +=
input_offset_difference * filter_bank_b_2[z][x];
adjusted_bias_data[z] = adjusted_bias_data_0[z];
adjusted_bias_data[4 + z] = adjusted_bias_data_1[z];
}
}
bias_data += 2 * bias_increment;
adjusted_bias_data += 8;
}
}
};
#ifdef USE_NEON
template <QuantizationType quantization_type>
struct ProcessPerDepth<DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
quantization_type> {
static void ProcessPerDepthIntrinsics(
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
filter_data,
const int32* bias_data, int8* shuffled_filter_data,
int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
const int depth = function_params->output_depth;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int bias_increment = function_params->bias_increment;
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
const int32 input_offset = function_params->input_offset;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(input_offset, -255);
TFLITE_DCHECK_LE(input_offset, 0);
}
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
const int8x16_t ones_vector = vdupq_n_s8(1);
// Simulate NEON-register transposition of subset of filter.
int8x16_t input_0_a;
int8x16_t input_0_b;
int8x16_t input_0_c;
int8x16_t input_1_a;
int8x16_t input_1_b;
int8x16_t input_1_c;
int8x16_t input_2_a;
int8x16_t input_2_b;
int8x16_t input_2_c;
int8x16_t filter_0_a;
int8x16_t filter_0_b;
int8x16_t filter_1_a;
int8x16_t filter_1_b;
int8x16_t filter_2_a;
int8x16_t filter_2_b;
// For uint8, effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
filter_block = filter_data;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
// Filter data is provided as filter_block[3][3][depth/8][2][4].
// height 3, width 3, micro-blocks, sub-block 0 or 1, depth 4.
// filter_bank[3][2][4][4]; Sub-block, height 3, depth 4, width 4.
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
filter_block_ptr = filter_block;
input_0_a = vld1q_lane_s8x8(filter_block_ptr, input_0_a, 0);
filter_block_ptr += depth;
input_0_b = vld1q_lane_s8x8(filter_block_ptr, input_0_b, 0);
filter_block_ptr += depth;
input_0_c = vld1q_lane_s8x8(filter_block_ptr, input_0_c, 0);
filter_block_ptr += depth;
input_1_a = vld1q_lane_s8x8(filter_block_ptr, input_1_a, 0);
filter_block_ptr += depth;
input_1_b = vld1q_lane_s8x8(filter_block_ptr, input_1_b, 0);
filter_block_ptr += depth;
input_1_c = vld1q_lane_s8x8(filter_block_ptr, input_1_c, 0);
filter_block_ptr += depth;
input_2_a = vld1q_lane_s8x8(filter_block_ptr, input_2_a, 0);
filter_block_ptr += depth;
input_2_b = vld1q_lane_s8x8(filter_block_ptr, input_2_b, 0);
filter_block_ptr += depth;
input_2_c = vld1q_lane_s8x8(filter_block_ptr, input_2_c, 0);
filter_0_a = vzip1q_s8(input_0_a, input_0_b);
filter_0_b = vzip1q_s8(input_0_c, sign_bit);
filter_1_a = vzip1q_s8(input_1_a, input_1_b);
filter_1_b = vzip1q_s8(input_1_c, sign_bit);
filter_2_a = vzip1q_s8(input_2_a, input_2_b);
filter_2_b = vzip1q_s8(input_2_c, sign_bit);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
filter_0_a = veorq_s8(filter_0_a, sign_bit);
filter_0_b = veorq_s8(filter_0_b, sign_bit);
filter_1_a = veorq_s8(filter_1_a, sign_bit);
filter_1_b = veorq_s8(filter_1_b, sign_bit);
filter_2_a = veorq_s8(filter_2_a, sign_bit);
filter_2_b = veorq_s8(filter_2_b, sign_bit);
}
vzipq_s8x2_in_place(&filter_0_a, &filter_0_b);
vzipq_s8x2_in_place(&filter_1_a, &filter_1_b);
vzipq_s8x2_in_place(&filter_2_a, &filter_2_b);
vst1q_s8(shuffled_filter_data, filter_0_a);
shuffled_filter_data += 16;
vst1q_s8(shuffled_filter_data, filter_0_b);
shuffled_filter_data += 16;
vst1q_s8(shuffled_filter_data, filter_1_a);
shuffled_filter_data += 16;
vst1q_s8(shuffled_filter_data, filter_1_b);
shuffled_filter_data += 16;
vst1q_s8(shuffled_filter_data, filter_2_a);
shuffled_filter_data += 16;
vst1q_s8(shuffled_filter_data, filter_2_b);
shuffled_filter_data += 16;
int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += bias_increment;
int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += bias_increment;
// For instance, if input_offset is kIntSymmetricZeroPoint, no adjustment
// is needed.
int32x4_t filter_sum_a = vdupq_n_s32(0);
filter_sum_a = vdotq_s32(filter_sum_a, filter_0_a, ones_vector);
filter_sum_a = vdotq_s32(filter_sum_a, filter_1_a, ones_vector);
filter_sum_a = vdotq_s32(filter_sum_a, filter_2_a, ones_vector);
int32x4_t filter_sum_b = vdupq_n_s32(0);
filter_sum_b = vdotq_s32(filter_sum_b, filter_0_b, ones_vector);
filter_sum_b = vdotq_s32(filter_sum_b, filter_1_b, ones_vector);
filter_sum_b = vdotq_s32(filter_sum_b, filter_2_b, ones_vector);
adjusted_bias_data_a = vmlaq_n_s32(adjusted_bias_data_a, filter_sum_a,
input_offset_difference);
adjusted_bias_data_b = vmlaq_n_s32(adjusted_bias_data_b, filter_sum_b,
input_offset_difference);
vst1q_s32(adjusted_bias_data, adjusted_bias_data_a);
adjusted_bias_data += 4;
vst1q_s32(adjusted_bias_data, adjusted_bias_data_b);
adjusted_bias_data += 4;
filter_block += 8;
}
}
static inline void Run(const typename QuantizationTypeImpl<
quantization_type>::ExternalType* filter_data,
const int32* bias_data, int8* shuffled_filter_data,
int32* adjusted_bias_data,
const DepthwiseConvDotProdParams* function_params) {
ProcessPerDepthIntrinsics(filter_data, bias_data, shuffled_filter_data,
adjusted_bias_data, function_params);
}
};
#endif
template <QuantizationType quantization_type, int32 max_padding>
struct PackMacroBlock<
DepthwiseConvImplementation::kUseCModel3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication, max_padding> {
// A straight copy of a macro block of input data into a scratch buffer.
//
// Requirement: depth_micro_repeats > 0.
static inline void CopyMacroBlock(
int32 height_block_number, int32 width_block_number,
const DepthwiseConvDotProdParams& function_params,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data) {
TFLITE_DCHECK_LE(max_padding, 1);
// Strides.
// The input depth and count of micro blocks provide the width strides.
const int input_height_stride = function_params.input_height_stride;
const int workspace_height_stride = function_params.workspace_height_stride;
const int input_depth = function_params.input_depth;
const int depth_micro_repeats = function_params.depth_micro_repeats;
TFLITE_DCHECK_GT(depth_micro_repeats, 0);
// Remaining iteration and dimension parameters.
//
// If width_overall_micro_repeats = input_width_micro_repeats + 1, then the
// final micro block is incomplete.
const int width_overall_micro_repeats =
function_params.input_width_overall_micro_repeats;
int input_width_micro_repeats = function_params.input_width_micro_repeats;
const int residual_width = function_params.residual_width;
const int block_height = function_params.inbound_block_height;
const int padding_left = function_params.padding_left;
const int padding_right = function_params.padding_right;
const int padding_top = function_params.padding_top;
const int padding_bottom = function_params.padding_bottom;
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params.width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params.height_macro_count - 1);
// Modify the trailing case to reflect the input width.
int input_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
if (trailing_width_padding) {
input_residual_width -= 1;
input_width_micro_repeats = width_overall_micro_repeats - 1;
}
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int32 input_offset_difference =
function_params.input_offset + kSymmetricZeroPoint;
// We load data into a temporary buffer and then save, to match subsequent
// processing. This will make it easier to combine stages into one ASM
// routine.
int8 tmp_load[4][2][4];
int copy_block_height = block_height;
if (leading_height_padding) {
memset(scratch_block_data, -input_offset_difference,
workspace_height_stride);
scratch_block_data += workspace_height_stride;
input_block_data += input_height_stride;
copy_block_height -= 1;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
// The outer 3 loops go through all the micro blocks in a macro block.
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
for (int j_width = 0; j_width < width_overall_micro_repeats; ++j_width) {
// Figure out division of work (available input vs trailing padding).
int adjusted_residual_width =
j_width == input_width_micro_repeats ? input_residual_width : 4;
int start_width = 0;
if (leading_width_padding && j_width == 0) {
start_width = 1;
memset(tmp_load[0][0], -input_offset_difference, 8);
}
if (adjusted_residual_width < 4) {
for (int x = adjusted_residual_width; x < 4; ++x) {
memset(tmp_load[x][0], -input_offset_difference, 8);
}
}
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
// The inner 3 loops go through the sub-block, depth and width within
// each micro block.
// Load, and apply symmetric offset.
int8* scratch_data =
scratch_block_data + k_height * workspace_height_stride +
j_width * 4 * 8 + i_depth * 4 * 8 * width_overall_micro_repeats;
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data = input_block_data + k_height * input_height_stride +
j_width * 4 * input_depth + i_depth * 8;
// Full-size macro blocks are 2*4*4 = 32 bytes.
for (int x = start_width; x < adjusted_residual_width; ++x) {
for (int s = 0; s < 2; ++s) {
for (int d = 0; d < 4; ++d) {
tmp_load[x][s][d] = input_data[x * input_depth + 4 * s + d] -
kSymmetricZeroPoint;
}
}
}
// Save results.
memcpy(&scratch_data[0], tmp_load[0][0], 8);
memcpy(&scratch_data[8], tmp_load[1][0], 8);
memcpy(&scratch_data[16], tmp_load[2][0], 8);
memcpy(&scratch_data[24], tmp_load[3][0], 8);
}
}
}
if (trailing_height_padding) {
memset(scratch_block_data + copy_block_height * workspace_height_stride,
-input_offset_difference, workspace_height_stride);
}
}
// Transpose 4x4 blocks within each sub-micro-block.
//
// Implemented somewhat like NEON register manipulation, so that we can see
// equivalence of the two approaches.
static inline void MicroTransposeBlocks(
const DepthwiseConvDotProdParams& function_params,
int8* scratch_block_data) {
const int workspace_height_stride = function_params.workspace_height_stride;
const int width_overall_micro_repeats =
function_params.input_width_overall_micro_repeats;
const int depth_micro_repeats = function_params.depth_micro_repeats;
const int block_height = function_params.inbound_block_height;
// Transpositions are 4x4, but doing 2 at a time is more efficient in the
// NEON code we are simulating.
int8 tmp_load[4][2][4]; // [width][sub-block][depth]
int8 tmp_transposed[4][2][4]; // [depth][sub-block][width]
int8 tmp_interleaved[2][4][4]; // [sub-block][depth][width]
// The outer 3 loops go through all the micro blocks in a macro block.
for (int k_height = 0; k_height < block_height; ++k_height) {
for (int j_width = 0; j_width < width_overall_micro_repeats; ++j_width) {
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
int8* scratch_data =
scratch_block_data + k_height * workspace_height_stride +
j_width * 4 * 8 + i_depth * 4 * 8 * width_overall_micro_repeats;
// A. Load data
memcpy(tmp_load[0][0], &scratch_data[0], 8);
memcpy(tmp_load[1][0], &scratch_data[8], 8);
memcpy(tmp_load[2][0], &scratch_data[16], 8);
memcpy(tmp_load[3][0], &scratch_data[24], 8);
// B. Simulate between-register transposition.
for (int x = 0; x < 4; ++x) {
for (int y = 0; y < 4; ++y) {
tmp_transposed[x][0][y] = tmp_load[y][0][x];
tmp_transposed[x][1][y] = tmp_load[y][1][x];
}
}
// C. Simulate between-register interleaving.
for (int x = 0; x < 4; ++x) {
for (int y = 0; y < 4; ++y) {
tmp_interleaved[0][x][y] = tmp_transposed[x][0][y];
tmp_interleaved[1][x][y] = tmp_transposed[x][1][y];
}
}
// D. Simulate mangled storage arrangement.
memcpy(&scratch_data[0], tmp_interleaved[0][0], 16);
memcpy(&scratch_data[16], tmp_interleaved[1][0], 16);
}
}
}
}
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
CopyMacroBlock(height_block_number, width_block_number, *function_params,
input_block_data, scratch_block_data);
MicroTransposeBlocks(*function_params, scratch_block_data);
}
};
template <QuantizationType quantization_type, int32 max_padding>
struct PackMacroBlock<
DepthwiseConvImplementation::kUseCModel3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth, max_padding> {
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Currently support for padding is limited to 1 on any side.
TFLITE_DCHECK_LE(max_padding, 1);
// Strides.
// The count of micro blocks (below) provides the width strides.
const int input_height_stride = function_params->input_height_stride;
const int workspace_height_stride =
function_params->workspace_height_stride;
// Remaining iteration and dimension parameters.
//
// If width_overall_micro_repeats = input_width_micro_repeats + 1, then the
// final micro block is incomplete.
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int residual_width = function_params->residual_width;
const int block_height = function_params->inbound_block_height;
TFLITE_DCHECK_GE(workspace_height_stride, 4 * width_overall_micro_repeats);
const int padding_left = function_params->padding_left;
const int padding_right = function_params->padding_right;
const int padding_top = function_params->padding_top;
const int padding_bottom = function_params->padding_bottom;
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params->width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params->height_macro_count - 1);
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int32 input_offset_difference =
function_params->input_offset + kSymmetricZeroPoint;
int copy_block_height = block_height;
if (leading_height_padding) {
memset(scratch_block_data, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_block_data += workspace_height_stride;
input_block_data += input_height_stride;
copy_block_height -= 1;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
int adjusted_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
if (trailing_width_padding) {
adjusted_residual_width -= 1;
}
int start_width = 0;
if (leading_width_padding) {
start_width = 1;
input_block_data += 1;
}
const int copy_size = (width_overall_micro_repeats - 1) * 4 +
adjusted_residual_width - start_width;
TFLITE_DCHECK_LE(
copy_size,
input_height_stride - width_block_number * input_width_micro_repeats);
// We may drop up to stride-1 of trailing input.
TFLITE_DCHECK_GE(copy_size, input_height_stride - 1);
// When there is unit input depth, the micro-block iteration need only be
// through the height. The micro blocks are contiguous across the width.
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data = input_block_data + k_height * input_height_stride;
int8* scratch_data =
scratch_block_data + k_height * workspace_height_stride;
// Handle leading padding. This is overwritten if there is no padding.
scratch_data[0] = -input_offset_difference;
memcpy(&scratch_data[start_width], input_data, copy_size);
for (int i = 0; i < copy_size; ++i) {
scratch_data[start_width + i] += -kSymmetricZeroPoint;
}
// Handle trailing padding, and fill in remainder of micro block.
memset(&scratch_data[start_width + copy_size], -input_offset_difference,
4 - adjusted_residual_width + kWorkspaceExtension);
}
if (trailing_height_padding) {
memset(scratch_block_data + copy_block_height * workspace_height_stride,
-input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
}
}
};
// Beginning of code section containing intermediate code transformation.
//
// This section is only compiled when kUseUnwound3x3DotProduct versions of
// templated functions are selected.
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseUnwound3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/0> {
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int input_depth = function_params->input_depth;
TFLITE_DCHECK_GE(depth_micro_repeats, 0);
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int micro_block_size = 4 * 8;
const int depth_advance = width_overall_micro_repeats * micro_block_size;
const int width_advance =
micro_block_size *
(1 - depth_micro_repeats * width_overall_micro_repeats);
const int height_advance = workspace_height_stride -
width_overall_micro_repeats * micro_block_size;
const int input_depth_skip = 4 * input_depth - 8 * depth_micro_repeats;
// Transpositions are 4x4, but doing 2 at a time is more efficient in the
// NEON code we are simulating. Note the blocks of 4x4 are still interleaved
// down the depth.
int8 tmp_load[4][2][4];
int8 tmp_transposed[4][2][4];
int8 tmp_interleaved[2][4][4];
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_block_data;
for (int k_height = 0; k_height < block_height; ++k_height) {
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data = input_block_data;
input_block_data += input_height_stride;
// Traverse the width one point at a time, but the depth in (micro) blocks
// of size 8.
//
// The depth and width margins, which are filled with "zeros", may be
// larger than is strictly needed to calculate output. This is because the
// conv calculation is performed across complete micro blocks.
for (int j_width = 0; j_width < input_width_micro_repeats; ++j_width) {
// Load, then zero.
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
// A. Simulate register loading.
for (int x = 0; x < 4; ++x) {
for (int s = 0; s < 2; ++s) {
for (int d = 0; d < 4; ++d) {
tmp_load[x][s][d] = input_data[x * input_depth + 4 * s + d] -
kSymmetricZeroPoint;
}
}
}
// B. Simulate between-register transposition.
for (int x = 0; x < 4; ++x) {
for (int y = 0; y < 4; ++y) {
tmp_transposed[x][0][y] = tmp_load[y][0][x];
tmp_transposed[x][1][y] = tmp_load[y][1][x];
}
}
// C and D are to be performed together as 4-byte stores in NEON code.
// C. Simulate between-register interleaving.
for (int x = 0; x < 4; ++x) {
for (int y = 0; y < 4; ++y) {
tmp_interleaved[0][x][y] = tmp_transposed[x][0][y];
tmp_interleaved[1][x][y] = tmp_transposed[x][1][y];
}
}
// D. Simulate mangled storage arrangement.
memcpy(&scratch_data[0], tmp_interleaved[0][0], 8);
memcpy(&scratch_data[8], tmp_interleaved[0][2], 8);
memcpy(&scratch_data[16], tmp_interleaved[1][0], 8);
memcpy(&scratch_data[24], tmp_interleaved[1][2], 8);
scratch_data += depth_advance;
input_data += 8;
}
scratch_data += width_advance;
input_data += input_depth_skip;
}
if (width_overall_micro_repeats > input_width_micro_repeats) {
TFLITE_DCHECK_EQ(width_overall_micro_repeats,
input_width_micro_repeats + 1);
TFLITE_DCHECK_GT(residual_width, 0);
// Figure out division of work (available input vs zero-ed).
const int adjusted_residual_width = residual_width;
// Load, then zero.
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
// A. Simulate register loading.
for (int x = 0; x < adjusted_residual_width; ++x) {
for (int s = 0; s < 2; ++s) {
for (int d = 0; d < 4; ++d) {
tmp_load[x][s][d] = input_data[x * input_depth + 4 * s + d] -
kSymmetricZeroPoint;
}
}
}
for (int x = adjusted_residual_width; x < 4; ++x) {
for (int s = 0; s < 2; ++s) {
for (int d = 0; d < 4; ++d) {
tmp_load[x][s][d] = 0;
}
}
}
// B. Simulate between-register transposition.
for (int x = 0; x < 4; ++x) {
for (int y = 0; y < 4; ++y) {
tmp_transposed[x][0][y] = tmp_load[y][0][x];
tmp_transposed[x][1][y] = tmp_load[y][1][x];
}
}
// C and D are to be performed together as 4-byte stores in NEON code.
// C. Simulate between-register interleaving.
for (int x = 0; x < 4; ++x) {
for (int y = 0; y < 4; ++y) {
tmp_interleaved[0][x][y] = tmp_transposed[x][0][y];
tmp_interleaved[1][x][y] = tmp_transposed[x][1][y];
}
}
// D. Simulate mangled storage arrangement.
memcpy(&scratch_data[0], tmp_interleaved[0][0], 8);
memcpy(&scratch_data[8], tmp_interleaved[0][2], 8);
memcpy(&scratch_data[16], tmp_interleaved[1][0], 8);
memcpy(&scratch_data[24], tmp_interleaved[1][2], 8);
scratch_data += depth_advance;
input_data += 8;
}
scratch_data += width_advance;
input_data += input_depth_skip;
}
scratch_data += height_advance;
}
TFLITE_DCHECK_EQ(scratch_data, scratch_block_data +
block_height * workspace_height_stride);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseUnwound3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/1> {
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
// Just use C model code for case of padding. Optimized versions merge the
// modifications therein to handle padding.
PackMacroBlock<DepthwiseConvImplementation::kUseCModel3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/1>::Run(height_block_number,
width_block_number, input_block_data,
scratch_block_data, function_params);
}
};
template <QuantizationType quantization_type, int32 max_padding>
struct PackMacroBlock<
DepthwiseConvImplementation::kUseUnwound3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth, max_padding> {
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int padding_left = function_params->padding_left;
const int padding_right = function_params->padding_right;
const int padding_top = function_params->padding_top;
const int padding_bottom = function_params->padding_bottom;
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
TFLITE_DCHECK_GE(workspace_height_stride, 4 * width_overall_micro_repeats);
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params->width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params->height_macro_count - 1);
const int32 input_offset = function_params->input_offset;
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
// Work through one slice, by row, at a time.
int8* scratch_data_base = scratch_block_data;
int copy_block_height = block_height;
if (leading_height_padding) {
copy_block_height -= 1;
memset(scratch_data_base, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_data_base += workspace_height_stride;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
int adjusted_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
if (trailing_width_padding) {
adjusted_residual_width -= 1;
}
int start_width = 0;
if (leading_width_padding) {
start_width = 1;
input_block_data += 1;
}
const int copy_size = (width_overall_micro_repeats - 1) * 4 +
adjusted_residual_width - start_width;
// Adjusted so that later conditionals are simplified.
const int copy_size_adjusted =
trailing_width_padding ? copy_size + 1 : copy_size;
TFLITE_DCHECK_LE(
copy_size,
input_height_stride - width_block_number * input_width_micro_repeats);
// We may drop up to stride-1 of trailing input.
TFLITE_DCHECK_GE(copy_size, input_height_stride - 1);
// This is used to simulate what should happen in registers.
int8 tmp_data[16];
int scratch_data_offset = 0;
int input_block_offset = 0;
if (copy_size >= 16) {
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// The surrounding condition ensures that we always need at least one
// iteration of the main copy loop. In the case of leading width
// padding, we unroll this specially.
if (leading_width_padding) {
memcpy(tmp_data + 1, input_block_data + input_block_offset, 15);
for (int i = 0; i < 16; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
tmp_data[0] = -input_offset_difference;
memcpy(scratch_data, tmp_data, 16);
copy_done += 15;
}
// Main copy loop.
for (; (copy_done + 16) <= copy_size; copy_done += 16) {
memcpy(tmp_data, input_block_data + input_block_offset + copy_done,
16);
for (int i = 0; i < 16; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 16, 0);
memcpy(&scratch_data[start_width + copy_done], tmp_data, 16);
}
const int copy_remaining = copy_size - copy_done;
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size_adjusted) {
// Employ overlapping-load strategy in order to load full register,
// but use only part.
memcpy(tmp_data,
input_block_data + input_block_offset + copy_done -
(16 - copy_remaining),
16);
// Shift to select the part that we need.
for (int i = 0; i < copy_remaining; ++i) {
tmp_data[i] = tmp_data[(16 - copy_remaining) + i];
}
for (int i = 0; i < 16; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
// Apply padding to remainder, some unnecessary but costless in regs.
for (int i = copy_remaining; i < 16; ++i) {
tmp_data[i] = -input_offset_difference;
}
const int final_repeats =
width_overall_micro_repeats - (start_width + copy_done) / 4;
for (int i = 0; i < final_repeats; ++i) {
memcpy(&scratch_data[start_width + copy_done], tmp_data + 4 * i, 4);
copy_done += 4;
}
}
memset(scratch_data + start_width + copy_done, -input_offset_difference,
kWorkspaceExtension);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (copy_size >= 4) {
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// The surrounding condition ensures that we always need at least one
// iteration of the main copy loop. In the case of leading width
// padding, we unroll this specially.
if (leading_width_padding) {
memcpy(tmp_data + 1, input_block_data + input_block_offset, 3);
for (int i = 0; i < 4; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
tmp_data[0] = -input_offset_difference;
memcpy(scratch_data, tmp_data, 4);
copy_done += 3;
}
for (; (copy_done + 4) <= copy_size; copy_done += 4) {
memcpy(tmp_data, input_block_data + input_block_offset + copy_done,
4);
for (int i = 0; i < 4; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
// Perform as 4 int32 stores, because that is our alignment.
memcpy(&scratch_data[start_width + copy_done], tmp_data, 4);
}
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
const int copy_remaining = copy_size - copy_done;
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size_adjusted) {
TFLITE_DCHECK_LT(copy_remaining, 4);
// Employ overlapping-load strategy in order to load full register,
// but use only part.
memcpy(tmp_data,
input_block_data + input_block_offset + copy_done -
(4 - copy_remaining),
4);
// Shift to select the part that we need.
for (int i = 0; i < copy_remaining; ++i) {
tmp_data[i] = tmp_data[(4 - copy_remaining) + i];
}
for (int i = 0; i < 4; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
// Apply padding to remainder, some unnecessary but costless in regs.
for (int i = copy_remaining; i < 4; ++i) {
tmp_data[i] = -input_offset_difference;
}
memcpy(&scratch_data[start_width + copy_done], tmp_data, 4);
copy_done += 4;
}
memset(scratch_data + start_width + copy_done, -input_offset_difference,
kWorkspaceExtension);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (width_overall_micro_repeats == 2) {
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Apply padding by quick fill of whole reg.
for (int i = 0; i < 8; ++i) {
tmp_data[i] = -input_offset;
}
for (int i = 0; i < copy_size; ++i) {
// Apply shift-left insert, tmp_data as both operands.
// The zero-index byte is left unchanged.
for (int i = 7; i > 0; --i) {
tmp_data[i] = tmp_data[i - 1];
}
tmp_data[1] =
input_block_data[input_block_offset + (copy_size - 1 - i)];
}
if (!leading_width_padding) {
// Remove leading padding, junking trailing byte, OK because max size
// is less than 8.
TFLITE_DCHECK_LT(copy_size_adjusted + start_width, 8);
for (int i = 0; i < 7; ++i) {
tmp_data[i] = tmp_data[i + 1];
}
}
for (int i = 0; i < 8; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
memcpy(scratch_data_base + scratch_data_offset, tmp_data, 8);
memset(scratch_data_base + scratch_data_offset + 8,
-input_offset_difference, kWorkspaceExtension);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else {
TFLITE_DCHECK_EQ(width_overall_micro_repeats, 1);
// This path is basically the same as the preceding, 2-micro-block one,
// but here we simply store fewer bytes.
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Apply padding by quick fill of whole reg.
for (int i = 0; i < 8; ++i) {
tmp_data[i] = -input_offset;
}
for (int i = 0; i < copy_size; ++i) {
// Apply shift-left insert, tmp_data as both operands.
// The zero-index byte is left unchanged.
for (int i = 7; i > 0; --i) {
tmp_data[i] = tmp_data[i - 1];
}
tmp_data[1] =
input_block_data[input_block_offset + (copy_size - 1 - i)];
}
if (!leading_width_padding) {
// Remove leading padding, junking trailing byte, OK because max size
// is less than 8.
TFLITE_DCHECK_LT(copy_size_adjusted + start_width, 8);
for (int i = 0; i < 7; ++i) {
tmp_data[i] = tmp_data[i + 1];
}
}
for (int i = 0; i < 8; ++i) {
tmp_data[i] += -kSymmetricZeroPoint;
}
memcpy(scratch_data_base + scratch_data_offset, tmp_data, 4);
memset(scratch_data_base + scratch_data_offset + 4,
-input_offset_difference, kWorkspaceExtension);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
}
scratch_data_base += copy_block_height * workspace_height_stride;
if (trailing_height_padding) {
memset(scratch_data_base, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_data_base += workspace_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_base,
scratch_block_data + block_height * workspace_height_stride);
}
};
// The preceding section is only compiled when kUseUnwound3x3DotProduct versions
// of templated functions are selected.
//
// End of code section containing intermediate code transformation.
#ifdef USE_NEON
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/0> {
static inline void PackMacroBlockIntrinsics(
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
TFLITE_DCHECK_EQ(function_params->padding_bottom, 0);
TFLITE_DCHECK_EQ(function_params->padding_top, 0);
TFLITE_DCHECK_EQ(function_params->padding_left, 0);
TFLITE_DCHECK_EQ(function_params->padding_right, 0);
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int input_depth = function_params->input_depth;
TFLITE_DCHECK_GE(depth_micro_repeats, 0);
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
const int micro_block_size = 4 * 8;
const int depth_advance = width_overall_micro_repeats * micro_block_size;
const int width_advance =
micro_block_size *
(1 - depth_micro_repeats * width_overall_micro_repeats);
const int height_advance = workspace_height_stride -
width_overall_micro_repeats * micro_block_size;
const int input_depth_skip = 4 * input_depth - 8 * depth_micro_repeats;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg_a;
int8x16_t work_reg_b;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
// Work through one slice, by row, at a time.
int8* scratch_data_0 = scratch_block_data;
for (int k_height = 0; k_height < block_height; ++k_height) {
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data_0 = input_block_data;
int8x16_t input_data_a;
int8x16_t input_data_b;
int8x16_t input_data_c;
int8x16_t input_data_d;
// Traverse the width one point at a time, but the depth in (micro) blocks
// of size 8.
//
// The depth and width margins, which are filled with "zeros", may be
// larger than is strictly needed to calculate output. This is because the
// conv calculation is performed across complete micro blocks.
for (int j_width = 0; j_width < input_width_micro_repeats; ++j_width) {
int8x16_t work_reg_a_sp;
int8x16_t work_reg_b_sp;
int i_depth = 0;
if (depth_micro_repeats >= 2) {
i_depth += 2;
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
input_data_0 += 16;
for (; i_depth < depth_micro_repeats - 1; i_depth += 2) {
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
input_data_0 += 16;
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
}
for (; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
input_data_b =
vld1q_lane_s8x8(input_data_0 + 1 * input_depth, input_data_b, 0);
input_data_c =
vld1q_lane_s8x8(input_data_0 + 2 * input_depth, input_data_c, 0);
input_data_d =
vld1q_lane_s8x8(input_data_0 + 3 * input_depth, input_data_d, 0);
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
input_data_0 += 8;
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
if (width_overall_micro_repeats > input_width_micro_repeats) {
TFLITE_DCHECK_EQ(width_overall_micro_repeats,
input_width_micro_repeats + 1);
TFLITE_DCHECK_GT(residual_width, 0);
TFLITE_DCHECK_LT(residual_width, 4);
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
input_data_c = vdupq_n_u8(kSignBit);
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
input_data_d = vdupq_n_u8(kSignBit);
if (residual_width > 1) {
input_data_b =
vld1q_lane_s8x8(input_data_0 + input_depth, input_data_b, 0);
if (residual_width == 3) {
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
}
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
input_data_0 += 8;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
scratch_data_0 += height_advance;
input_block_data += input_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_0,
scratch_block_data + block_height * workspace_height_stride);
}
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
#ifdef __aarch64__
PreloadInputBlock(input_block_data, function_params);
#endif
PackMacroBlockIntrinsics(input_block_data, scratch_block_data,
function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*max_padding=*/1> {
static inline void PackMacroBlockIntrinsics(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int input_depth = function_params->input_depth;
const int padding_left = function_params->padding_left;
const int padding_right = function_params->padding_right;
const int padding_top = function_params->padding_top;
const int padding_bottom = function_params->padding_bottom;
TFLITE_DCHECK_GT(depth_micro_repeats, 0);
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
const int micro_block_size = 4 * 8;
const int depth_advance = width_overall_micro_repeats * micro_block_size;
const int width_advance =
micro_block_size *
(1 - depth_micro_repeats * width_overall_micro_repeats);
const int height_advance = workspace_height_stride -
width_overall_micro_repeats * micro_block_size;
const int input_depth_skip = 4 * input_depth - 8 * depth_micro_repeats;
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params->width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params->height_macro_count - 1);
const int32 input_offset = function_params->input_offset;
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg_a;
int8x16_t work_reg_b;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
// Work through one slice, by row, at a time.
int8* scratch_data_0 = scratch_block_data;
int copy_block_height = block_height;
if (leading_height_padding) {
copy_block_height -= 1;
memset(scratch_data_0, -input_offset_difference, workspace_height_stride);
scratch_data_0 += workspace_height_stride;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_data_0 = input_block_data;
int8x16_t input_data_a;
int8x16_t input_data_b;
int8x16_t input_data_c;
int8x16_t input_data_d;
// Traverse the width one point at a time, but the depth in (micro) blocks
// of size 8.
//
// The depth and width margins, which are filled with "zeros", may be
// larger than is strictly needed to calculate output. This is because the
// conv calculation is performed across complete micro blocks.
for (int j_width = 0; j_width < width_overall_micro_repeats; ++j_width) {
// Figure out division of work (available input vs zero-ed).
int adjusted_residual_width =
j_width == (input_width_micro_repeats) ? residual_width : 4;
if (trailing_width_padding &&
j_width == (width_overall_micro_repeats - 1)) {
adjusted_residual_width -= 1;
}
int start_width = 0;
if (leading_width_padding && j_width == 0) {
start_width = 1;
}
if (start_width == 0) {
if (adjusted_residual_width == 4) {
int8x16_t work_reg_a_sp;
int8x16_t work_reg_b_sp;
int i_depth = 0;
if (depth_micro_repeats >= 2) {
i_depth += 2;
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
input_data_0 += 16;
for (; i_depth < depth_micro_repeats - 1; i_depth += 2) {
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
input_data_a = util_vld1q_x8(input_data_0);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
input_data_0 += 16;
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
}
for (; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
input_data_b = vld1q_lane_s8x8(input_data_0 + 1 * input_depth,
input_data_b, 0);
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
input_data_d = vld1q_lane_s8x8(input_data_0 + 3 * input_depth,
input_data_d, 0);
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
input_data_0 += 8;
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
} else {
TFLITE_DCHECK_LT(adjusted_residual_width, 4);
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = vdupq_n_u8(-input_offset);
input_data_c = vdupq_n_u8(-input_offset);
input_data_d = vdupq_n_u8(-input_offset);
if (adjusted_residual_width > 0) {
input_data_a = vld1q_lane_s8x8(input_data_0, input_data_a, 0);
if (adjusted_residual_width > 1) {
input_data_b = vld1q_lane_s8x8(input_data_0 + input_depth,
input_data_b, 0);
if (adjusted_residual_width == 3) {
input_data_c = vld1q_lane_s8x8(
input_data_0 + 2 * input_depth, input_data_c, 0);
}
}
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
input_data_0 += 8;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
} else {
if (adjusted_residual_width == 4) {
int8x16_t work_reg_a_sp;
int8x16_t work_reg_b_sp;
int i_depth = 0;
if (depth_micro_repeats >= 2) {
i_depth += 2;
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
input_data_0 += 16;
for (; i_depth < depth_micro_repeats - 1; i_depth += 2) {
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = util_vld1q_x8(input_data_0 + 1 * input_depth);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
if (quantization_type ==
QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
input_data_c = util_vld1q_x8(input_data_0 + 2 * input_depth);
input_data_d = util_vld1q_x8(input_data_0 + 3 * input_depth);
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
input_data_0 += 16;
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
work_reg_a_sp = vzip2q_s8(input_data_a, input_data_b);
work_reg_b_sp = vzip2q_s8(input_data_c, input_data_d);
vzipq_s8x2_in_place(&work_reg_a_sp, &work_reg_b_sp);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a_sp = veorq_s8(work_reg_a_sp, sign_bit);
work_reg_b_sp = veorq_s8(work_reg_b_sp, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a_sp);
vst1q_s8(scratch_data_0 + 16, work_reg_b_sp);
scratch_data_0 += depth_advance;
}
for (; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = vld1q_lane_s8x8(input_data_0 + 1 * input_depth,
input_data_b, 0);
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
input_data_d = vld1q_lane_s8x8(input_data_0 + 3 * input_depth,
input_data_d, 0);
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
input_data_0 += 8;
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
} else {
TFLITE_DCHECK_LT(adjusted_residual_width, 4);
for (int i_depth = 0; i_depth < depth_micro_repeats; ++i_depth) {
input_data_a = vdupq_n_u8(-input_offset);
input_data_b = vdupq_n_u8(-input_offset);
input_data_c = vdupq_n_u8(-input_offset);
input_data_d = vdupq_n_u8(-input_offset);
// Skip loading first column.
if (adjusted_residual_width > 1) {
input_data_b = vld1q_lane_s8x8(input_data_0 + input_depth,
input_data_b, 0);
if (adjusted_residual_width == 3) {
input_data_c = vld1q_lane_s8x8(input_data_0 + 2 * input_depth,
input_data_c, 0);
}
}
work_reg_a = vzip1q_s8(input_data_a, input_data_b);
work_reg_b = vzip1q_s8(input_data_c, input_data_d);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg_a = veorq_s8(work_reg_a, sign_bit);
work_reg_b = veorq_s8(work_reg_b, sign_bit);
}
vzipq_s8x2_in_place(&work_reg_a, &work_reg_b);
vst1q_s8(scratch_data_0, work_reg_a);
vst1q_s8(scratch_data_0 + 16, work_reg_b);
scratch_data_0 += depth_advance;
input_data_0 += 8;
}
scratch_data_0 += width_advance;
input_data_0 += input_depth_skip;
}
}
}
scratch_data_0 += height_advance;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
memset(scratch_data_0, -input_offset_difference, workspace_height_stride);
scratch_data_0 += workspace_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_0,
scratch_block_data + block_height * workspace_height_stride);
}
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
#ifdef __aarch64__
PreloadInputBlock(input_block_data, function_params);
#endif
PackMacroBlockIntrinsics(height_block_number, width_block_number,
input_block_data, scratch_block_data,
function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*max_padding=*/1> {
static inline void PackMacroBlockIntrinsics(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
const int padding_left = function_params->padding_left;
const int padding_right = function_params->padding_right;
const int padding_top = function_params->padding_top;
const int padding_bottom = function_params->padding_bottom;
constexpr int kSymmetricZeroPoint =
QuantizationTypeImpl<quantization_type>::kIntSymmetricZeroPoint;
TFLITE_DCHECK_GE(workspace_height_stride, 4 * width_overall_micro_repeats);
const bool leading_width_padding =
padding_left > 0 && width_block_number == 0;
const bool trailing_width_padding =
padding_right > 0 &&
width_block_number == (function_params->width_macro_count - 1);
const bool leading_height_padding =
padding_top > 0 && height_block_number < 0;
const bool trailing_height_padding =
padding_bottom > 0 &&
height_block_number == (function_params->height_macro_count - 1);
const int32 input_offset = function_params->input_offset;
const int32 input_offset_difference = input_offset + kSymmetricZeroPoint;
// Work through one slice, by row, at a time.
int8* scratch_data_base = scratch_block_data;
int copy_block_height = block_height;
if (leading_height_padding) {
copy_block_height -= 1;
memset(scratch_data_base, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_data_base += workspace_height_stride;
input_block_data += input_height_stride;
}
if (trailing_height_padding) {
copy_block_height -= 1;
}
int adjusted_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
if (trailing_width_padding) {
adjusted_residual_width -= 1;
}
int start_width = 0;
if (leading_width_padding) {
start_width = 1;
input_block_data += 1;
}
const int copy_size = (width_overall_micro_repeats - 1) * 4 +
adjusted_residual_width - start_width;
// Adjusted so that later conditionals are simplified.
const int copy_size_adjusted =
trailing_width_padding ? copy_size + 1 : copy_size;
TFLITE_DCHECK_LE(
copy_size,
input_height_stride - width_block_number * input_width_micro_repeats);
// We may drop up to stride-1 of trailing input.
TFLITE_DCHECK_GE(copy_size, input_height_stride - 1);
int scratch_data_offset = 0;
int input_block_offset = 0;
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg;
int8x8_t half_work_reg;
int8x8_t padding_mask;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
const uint8x16_t padding_reg = vdupq_n_u8(-input_offset);
padding_mask = vdup_n_s8(-1);
half_work_reg = vdup_n_s8(0);
if (copy_size >= 16) {
const int copy_remaining = (copy_size + start_width) & 0x7;
padding_mask = vreinterpret_s8_s64(vshl_s64(
vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining)));
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// The surrounding condition ensures that we always need at least one
// iteration of the main copy loop. In the case of leading width
// padding, we unroll this specially.
if (leading_width_padding) {
work_reg = util_vld1q_x8(input_block_data + input_block_offset);
work_reg = vextq_s8(padding_reg, work_reg, 15);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg = veorq_s8(work_reg, sign_bit);
}
vst1q_s8(scratch_data, work_reg);
copy_done += 15;
}
// Main copy loop.
for (; (copy_done + 16) <= copy_size; copy_done += 16) {
work_reg =
util_vld1q_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg = veorq_s8(work_reg, sign_bit);
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 16, 0);
vst1q_s8(scratch_data + start_width + copy_done, work_reg);
}
if (copy_done + 8 <= copy_size) {
half_work_reg =
util_vld1_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 8, 0);
vst1_s8(scratch_data + start_width + copy_done, half_work_reg);
copy_done += 8;
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size_adjusted) {
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = util_vld1_x8(input_block_data + input_block_offset +
copy_size - 8);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (8 - copy_remaining))));
half_work_reg = vbsl_s8(vreinterpret_u8_s8(padding_mask),
vget_low_s8(padding_reg), half_work_reg);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 8, 0);
vst1_s8(scratch_data + start_width + copy_done, half_work_reg);
}
// Trailing guard.
vst1_s8(scratch_data + start_width + copy_done, half_work_reg);
vst1_s8(scratch_data + start_width + copy_done + 8, half_work_reg);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (copy_size >= 4) {
const int copy_remaining = (copy_size + start_width) & 0x3;
padding_mask = vreinterpret_s8_s64(vshl_s64(
vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining)));
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// The surrounding condition ensures that we always need at least one
// iteration of the main copy loop. In the case of leading width
// padding, we unroll this specially.
if (leading_width_padding) {
half_work_reg = vld1_lane_8x4(input_block_data + input_block_offset,
half_work_reg, 0);
half_work_reg = vext_s8(vget_low_s8(padding_reg), half_work_reg, 7);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
vst1_lane_8x4(scratch_data, half_work_reg, 0);
copy_done += 3;
}
// Main copy loop.
for (; (copy_done + 4) <= copy_size; copy_done += 4) {
half_work_reg =
vld1_lane_8x4(input_block_data + input_block_offset + copy_done,
half_work_reg, 0);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 4, 0);
vst1_lane_8x4(scratch_data + start_width + copy_done, half_work_reg,
0);
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size_adjusted) {
TFLITE_DCHECK_LT(copy_remaining, 4);
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = vld1_lane_8x4(
input_block_data + input_block_offset + copy_size - 4,
half_work_reg, 0);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (4 - copy_remaining))));
half_work_reg = vbsl_s8(vreinterpret_u8_s8(padding_mask),
vget_low_s8(padding_reg), half_work_reg);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ((start_width + copy_done) % 4, 0);
vst1_lane_8x4(scratch_data + start_width + copy_done, half_work_reg,
0);
copy_done += 4;
}
// Trailing guard.
vst1_lane_8x4(scratch_data + start_width + copy_done, half_work_reg, 0);
vst1_lane_8x4(scratch_data + start_width + copy_done + 4, half_work_reg,
0);
vst1_lane_8x4(scratch_data + start_width + copy_done + 8, half_work_reg,
0);
vst1_lane_8x4(scratch_data + start_width + copy_done + 12,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (width_overall_micro_repeats == 2) {
// Special case of 1 + 3 + 1, padding + copy + padding.
// This is rarely executed in practice.
TFLITE_DCHECK_EQ(copy_size, 3);
TFLITE_DCHECK_EQ(start_width, 1);
TFLITE_DCHECK(leading_width_padding);
TFLITE_DCHECK(trailing_width_padding);
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
half_work_reg = vdup_n_u8(-input_offset);
half_work_reg = vld1_lane_s8(reinterpret_cast<const int8*>(
input_block_data + input_block_offset),
half_work_reg, 1);
half_work_reg =
vld1_lane_s8(reinterpret_cast<const int8*>(input_block_data +
input_block_offset + 1),
half_work_reg, 2);
half_work_reg =
vld1_lane_s8(reinterpret_cast<const int8*>(input_block_data +
input_block_offset + 2),
half_work_reg, 3);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(scratch_data_offset % 8, 0);
vst1_s8(scratch_data_base + scratch_data_offset, half_work_reg);
// Trailing guard.
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 4,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 8,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 12,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 16,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else {
TFLITE_DCHECK_EQ(width_overall_micro_repeats, 1);
const int copy_remaining = (copy_size + start_width) & 0x3;
padding_mask = vreinterpret_s8_s64(vshl_s64(
vreinterpret_s64_s8(padding_mask), vdup_n_s64(8 * copy_remaining)));
if (leading_width_padding) {
padding_mask = vset_lane_u8(255, padding_mask, 0);
}
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
for (int i = 0; i < copy_size; ++i) {
half_work_reg = vreinterpret_s8_s64(
vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
half_work_reg = vld1_lane_s8(
reinterpret_cast<const int8*>(
input_block_data + input_block_offset + copy_size - 1 - i),
half_work_reg, 0);
}
if (leading_width_padding) {
half_work_reg = vreinterpret_s8_s64(
vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
}
half_work_reg = vbsl_s8(vreinterpret_u8_s8(padding_mask),
vget_low_s8(padding_reg), half_work_reg);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(scratch_data_offset % 4, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset, half_work_reg,
0);
// Trailing guard.
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 4,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 8,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 12,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 16,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
}
scratch_data_base += copy_block_height * workspace_height_stride;
if (trailing_height_padding) {
memset(scratch_data_base, -input_offset_difference,
workspace_height_stride + kWorkspaceExtension);
scratch_data_base += workspace_height_stride;
}
TFLITE_DCHECK_EQ(
scratch_data_base,
scratch_block_data + block_height * workspace_height_stride);
}
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
#ifdef __aarch64__
PreloadInputBlock(input_block_data, function_params);
#endif
PackMacroBlockIntrinsics(height_block_number, width_block_number,
input_block_data, scratch_block_data,
function_params);
}
};
template <QuantizationType quantization_type>
struct PackMacroBlock<DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*max_padding=*/0> {
static inline void PackMacroBlockIntrinsics(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int input_width_micro_repeats =
function_params->input_width_micro_repeats;
const int block_height = function_params->inbound_block_height;
const int residual_width = function_params->residual_width;
const int input_height_stride = function_params->input_height_stride;
TFLITE_DCHECK_EQ(function_params->padding_left, 0);
TFLITE_DCHECK_EQ(function_params->padding_right, 0);
TFLITE_DCHECK_EQ(function_params->padding_top, 0);
TFLITE_DCHECK_EQ(function_params->padding_bottom, 0);
TFLITE_DCHECK_GE(workspace_height_stride, 4 * width_overall_micro_repeats);
// Work through one slice, by row, at a time.
int8* scratch_data_base = scratch_block_data;
const int copy_block_height = block_height;
int adjusted_residual_width =
input_width_micro_repeats < width_overall_micro_repeats ? residual_width
: 4;
const int copy_size =
(width_overall_micro_repeats - 1) * 4 + adjusted_residual_width;
TFLITE_DCHECK_LE(
copy_size,
input_height_stride - width_block_number * input_width_micro_repeats);
// We may drop up to stride-1 of trailing input.
TFLITE_DCHECK_GE(copy_size, input_height_stride - 1);
int scratch_data_offset = 0;
int input_block_offset = 0;
constexpr uint8 kSignBit =
QuantizationTypeImpl<quantization_type>::kUint8SignBit;
// Transpositions are 4x4, but doing 2 at a time is more efficient in NEON
// code. Note the blocks of 4x4 are still interleaved down the depth.
int8x16_t work_reg;
int8x8_t half_work_reg;
// Effect subtraction of zero-point = 128 by XOR of sign bit.
const uint8x16_t sign_bit = vdupq_n_u8(kSignBit);
half_work_reg = vdup_n_s8(0);
if (copy_size >= 16) {
const int copy_remaining = copy_size & 0x7;
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// Main copy loop.
for (; (copy_done + 16) <= copy_size; copy_done += 16) {
work_reg =
util_vld1q_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
work_reg = veorq_s8(work_reg, sign_bit);
}
TFLITE_DCHECK_EQ(copy_done % 16, 0);
vst1q_s8(scratch_data + copy_done, work_reg);
}
if (copy_done + 8 <= copy_size) {
half_work_reg =
util_vld1_x8(input_block_data + input_block_offset + copy_done);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 8, 0);
vst1_s8(scratch_data + copy_done, half_work_reg);
copy_done += 8;
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size) {
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = util_vld1_x8(input_block_data + input_block_offset +
copy_size - 8);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (8 - copy_remaining))));
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 8, 0);
vst1_s8(scratch_data + copy_done, half_work_reg);
copy_done += 8;
}
// Trailing guard.
vst1_s8(scratch_data + copy_done, half_work_reg);
vst1_s8(scratch_data + copy_done + 8, half_work_reg);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else if (copy_size >= 4) {
const int copy_remaining = copy_size & 0x3;
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
// Work through one slice, by row, at a time.
int8* scratch_data = scratch_data_base + scratch_data_offset;
int copy_done = 0;
// Main copy loop.
for (; (copy_done + 4) <= copy_size; copy_done += 4) {
half_work_reg =
vld1_lane_8x4(input_block_data + input_block_offset + copy_done,
half_work_reg, 0);
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 4, 0);
vst1_lane_8x4(scratch_data + copy_done, half_work_reg, 0);
}
TFLITE_DCHECK_EQ(copy_remaining, copy_size - copy_done);
// Total amount
// = copy_size - copy_done + 4 - adjusted_residual_width
// = width_overall_micro_repeats * 4 - start_width - copy_done.
// Undone micro blocks
// = width_overall_micro_repeats - (start_width + copy_done) / 4.
// Conditional is (copy_remaining > 0 || trailing_width_padding).
if (copy_done < copy_size) {
TFLITE_DCHECK_LT(copy_remaining, 4);
// Employ overlapping-load strategy in order to load full register,
// but use only part.
// This has the advantage of resulting in zeros after shifting.
half_work_reg = vld1_lane_8x4(
input_block_data + input_block_offset + copy_size - 4,
half_work_reg, 0);
half_work_reg = vreinterpret_s8_s64(
vshl_s64(vreinterpret_s64_s8(half_work_reg),
vdup_n_s64(-8 * (4 - copy_remaining))));
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
}
TFLITE_DCHECK_EQ(copy_done % 4, 0);
vst1_lane_8x4(scratch_data + copy_done, half_work_reg, 0);
copy_done += 4;
}
// Trailing guard.
vst1_lane_8x4(scratch_data + copy_done, half_work_reg, 0);
vst1_lane_8x4(scratch_data + copy_done + 4, half_work_reg, 0);
vst1_lane_8x4(scratch_data + copy_done + 8, half_work_reg, 0);
vst1_lane_8x4(scratch_data + copy_done + 12, half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
} else {
TFLITE_DCHECK_EQ(width_overall_micro_repeats, 1);
for (int k_height = 0; k_height < copy_block_height; ++k_height) {
for (int i = 0; i < copy_size; ++i) {
half_work_reg = vreinterpret_s8_s64(
vshl_n_s64(vreinterpret_s64_s8(half_work_reg), 8));
half_work_reg = vld1_lane_s8(
reinterpret_cast<const int8*>(
input_block_data + input_block_offset + copy_size - 1 - i),
half_work_reg, 0);
}
half_work_reg = veor_s8(half_work_reg, vget_low_s8(sign_bit));
TFLITE_DCHECK_EQ(scratch_data_offset % 4, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset, half_work_reg,
0);
// Trailing guard.
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 4,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 8,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 12,
half_work_reg, 0);
vst1_lane_8x4(scratch_data_base + scratch_data_offset + 16,
half_work_reg, 0);
scratch_data_offset += workspace_height_stride;
input_block_offset += input_height_stride;
}
}
scratch_data_base += copy_block_height * workspace_height_stride;
TFLITE_DCHECK_EQ(
scratch_data_base,
scratch_block_data + block_height * workspace_height_stride);
}
static inline void Run(
int32 height_block_number, int32 width_block_number,
const typename QuantizationTypeImpl<quantization_type>::ExternalType*
input_block_data,
int8* scratch_block_data,
const DepthwiseConvDotProdParams* function_params) {
#ifdef __aarch64__
PreloadInputBlock(input_block_data, function_params);
#endif
PackMacroBlockIntrinsics(height_block_number, width_block_number,
input_block_data, scratch_block_data,
function_params);
}
};
#endif // ARM NEON
// Apply filter to macro block of input data and store results.
//
// Requirement: depth_micro_repeats > 0 || residual_depth > 0.
template <int32 stride, QuantizationType quantization_type>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseCModel3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication, stride> {
// Construct a width-shifted combination of two input sub-blocks, effectively
// concatenating them.
//
// The filter is applied using sub-blocks. These are in the needed form for
// the first (width) offset. For subsequent offsets, the filter is applied to
// shifted and combined data. The concatentation and shifting herein is fairly
// straightforward, but in the optimized code is an area of creativity in
// design because NEON instructions do not directly support the required
// between-register permutation.
//
// In NEON optimized code, input data is grouped in 4-byte blocks. In order to
// move along the width for each output point calculation, data is shifted, in
// essence between two such blocks.
//
// selected_data has format height 3, depth 4, width 4.
//
// When the micro block is trailing (the last across the macro-block width),
// it would be illegal to load the right (next) block, and the no_right_block
// indicates this scenario.
static inline void ConcatenateInputSubBlocks(int offset, int sub_block,
int workspace_height_stride,
int width_micro_stride,
bool no_right_block,
const int8* input_block,
int8 selected_data[3][4][4]) {
TFLITE_DCHECK_GE(offset, 0);
TFLITE_DCHECK_LT(offset, 4);
// The input banks have same format as selected_data.
int8 left_bank[3][4][4];
int8 right_bank[3][4][4];
// Work through one slice, by row, at a time.
for (int k_height = 0; k_height < 3; ++k_height) {
// Simulate demangling of mangled storage arrangement.
const int8* left_input_block =
&input_block[k_height * workspace_height_stride + sub_block * 2 * 8];
memcpy(left_bank[k_height][0], left_input_block, 16);
if (no_right_block) {
memset(right_bank[k_height][0], 0, 16);
} else {
const int8* right_input_block =
&input_block[k_height * workspace_height_stride +
sub_block * 2 * 8 + width_micro_stride];
memcpy(right_bank[k_height][0], right_input_block, 16);
}
for (int depth_index = 0; depth_index < 4; ++depth_index) {
memcpy(selected_data[k_height][depth_index],
&left_bank[k_height][depth_index][offset], 4 - offset);
memcpy(&selected_data[k_height][depth_index][4 - offset],
right_bank[k_height][depth_index], offset);
}
}
}
// Straight implementation of 3x3 filter within sub-micro block.
static inline void Calculate3x3FilterOutput(
const DepthwiseConvDotProdParams& params, int sub_block,
const int8 selected_data[3][4][4], const int8 filter_bank[3][2][4][4],
const int32* bias_data, uint8 output_values[4]) {
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32 output_multiplier = params.output_multiplier;
const int32 output_shift = params.output_shift;
const int32 output_offset = params.output_offset;
for (int d = 0; d < 4; ++d) {
int32 acc = 0;
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 4; ++x) {
int32 input_val = selected_data[y][d][x];
int32 filter_val = filter_bank[y][sub_block][d][x];
acc += filter_val * input_val;
}
}
acc += bias_data[d];
acc = reference_ops::depthwise_conv::DepthwiseConvRound<
DepthwiseConvOutputRounding::kUpward>(acc, output_multiplier,
output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_values[d] = static_cast<uint8>(acc);
}
}
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int input_width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->input_depth;
const int stride_val = function_params->stride;
const int four_over_stride = function_params->four_over_stride;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int bias_increment = 4;
TFLITE_DCHECK_EQ(function_params->bias_increment, bias_increment);
TFLITE_DCHECK(depth_micro_repeats > 0);
const int width_micro_stride = 4 * 8;
const int depth_micro_stride =
width_micro_stride * input_width_overall_micro_repeats;
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
// Simulate NEON-register transposition of subset of filter.
int8 filter_bank[3][2][4][4]; // Height 3, sub-block, depth 4, width 4.
// Simulate NEON-register input data concatenation + sub-selection.
int8 sub_selected_input_data[3][4][4]; // Height 3, depth 4, width 4.
uint8 output_values[4]; // Depth 4.
// The outer 3 loops go through all the micro blocks in a macro block, and
// separately treat the two sub-blocks within each micro block.
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
memcpy(filter_bank[0][0][0],
filter_workspace + j_depth * shuffled_filter_increment,
shuffled_filter_increment);
for (int s = 0; s < 2; ++s) {
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* scratch_data =
scratch_block_data +
workspace_height_stride * k_height * stride_val +
depth_micro_stride * j_depth;
uint8* output_data =
output_block_data + output_height_stride * k_height + 8 * j_depth;
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width = i_width == output_width_micro_repeats
? residual_width
: four_over_stride;
const bool no_right_block = (output_width - 1) * stride_val < 2;
TFLITE_DCHECK_LE(output_width * stride_val, 4);
const int8* input_data =
scratch_data + width_micro_stride * i_width;
// Iterate over input width shifts within sub-micro blocks.
for (int x = 0; x < output_width; ++x) {
ConcatenateInputSubBlocks(x * stride_val, s,
workspace_height_stride,
width_micro_stride, no_right_block,
input_data, sub_selected_input_data);
Calculate3x3FilterOutput(
*function_params, s, sub_selected_input_data, filter_bank,
bias_data + (2 * j_depth + s) * bias_increment,
output_values);
for (int d = 0; d < 4; ++d) {
output_data[depth * (four_over_stride * i_width + x) + 4 * s +
d] = output_values[d];
}
}
}
}
}
}
}
};
// Apply filter to macro block of input data and store results.
//
// Parameters for repeats and residual sizes are in terms of outputs.
//
// Requirement: depth_micro_repeats > 0 || residual_depth > 0.
template <int32 stride, QuantizationType quantization_type>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseCModel3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth, stride> {
// Construct a width-shifted combination of two input sub-blocks, effectively
// concatenating them.
//
// The filter is applied using sub-blocks. These are in the needed form for
// the first (width) offset. For subsequent offsets, the filter is applied to
// shifted and combined data. The concatentation and shifting herein is fairly
// straightforward, but in the optimized code is an area of creativity in
// design because NEON instructions do not directly support the required
// between-register permutation.
//
// In NEON optimized code, input data is grouped in 4-byte blocks. In order to
// move along the width for each output point calculation, data is shifted, in
// essence between two such blocks.
//
// selected_data has format height 3, width 4.
//
// When the micro block is trailing (the last across the macro-block width),
// it would be illegal to load the right (next) block, and the no_right_block
// indicates this scenario.
static inline void ConcatenateInputSubBlocks(int offset,
int workspace_height_stride,
bool no_right_block,
const int8* input_block,
int8 selected_data[3][4]) {
TFLITE_DCHECK_GE(offset, 0);
TFLITE_DCHECK_LT(offset, 4);
if (no_right_block) {
for (int k_height = 0; k_height < 3; ++k_height) {
memcpy(selected_data[k_height],
&input_block[k_height * workspace_height_stride + offset],
4 - offset);
}
} else {
for (int k_height = 0; k_height < 3; ++k_height) {
memcpy(selected_data[k_height],
&input_block[k_height * workspace_height_stride + offset], 4);
}
}
}
// Straight implementation of 3x3 filter within sub-micro block.
static inline void Calculate3x3FilterOutput(
const DepthwiseConvDotProdParams& function_params, int sub_block,
const int8 selected_data[3][4], const int8 filter_bank[3][2][4][4],
const int32* bias_data, uint8 output_values[4]) {
const int32 output_activation_min =
function_params.quantized_activation_min;
const int32 output_activation_max =
function_params.quantized_activation_max;
const int32 output_multiplier = function_params.output_multiplier;
const int32 output_shift = function_params.output_shift;
const int32 output_offset = function_params.output_offset;
for (int d = 0; d < 4; ++d) {
int32 acc = 0;
for (int y = 0; y < 3; ++y) {
for (int x = 0; x < 4; ++x) {
int32 input_val = selected_data[y][x];
int32 filter_val = filter_bank[y][sub_block][d][x];
acc += filter_val * input_val;
}
}
acc += bias_data[d];
acc = reference_ops::depthwise_conv::DepthwiseConvRound<
DepthwiseConvOutputRounding::kUpward>(acc, output_multiplier,
output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_values[d] = static_cast<uint8>(acc);
}
}
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->output_depth;
const int stride_val = function_params->stride;
const int four_over_stride = function_params->four_over_stride;
const int workspace_width_micro_repeats =
function_params->workspace_width_micro_repeats;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int bias_increment = 4;
TFLITE_DCHECK_EQ(function_params->bias_increment, bias_increment);
TFLITE_DCHECK(depth_micro_repeats > 0);
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
// Simulate NEON-register transposition of subset of filter.
int8 filter_bank[3][2][4][4]; // Height 3, sub-block, depth 4, width 4.
// Simulate NEON-register input data concatenation + sub-selection.
int8 sub_selected_input_data[3][4]; // Height 3, depth 4, width 4.
uint8 output_values[4]; // Depth 4.
// The outer 3 loops go through all the micro blocks in a macro block, and
// separately treat the two sub-blocks within each micro block.
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
memcpy(filter_bank[0][0][0],
filter_workspace + j_depth * shuffled_filter_increment,
shuffled_filter_increment);
for (int s = 0; s < 2; ++s) {
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* scratch_data =
scratch_block_data +
workspace_height_stride * k_height * stride_val;
uint8* output_data =
output_block_data + output_height_stride * k_height + 8 * j_depth;
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width = i_width == output_width_micro_repeats
? residual_width
: four_over_stride;
const bool no_right_block = i_width == output_width_micro_repeats &&
output_width_overall_micro_repeats ==
workspace_width_micro_repeats;
TFLITE_DCHECK_LE(output_width * stride_val, 4);
const int8* input_data = scratch_data + 4 * i_width;
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
ConcatenateInputSubBlocks(x * stride_val, workspace_height_stride,
no_right_block, input_data,
sub_selected_input_data);
Calculate3x3FilterOutput(
*function_params, s, sub_selected_input_data, filter_bank,
bias_data + (2 * j_depth + s) * bias_increment,
output_values);
for (int d = 0; d < 4; ++d) {
output_data[depth * (four_over_stride * i_width + x) + 4 * s +
d] = output_values[d];
}
}
}
}
}
}
}
};
// Beginning of code section containing intermediate code transformation.
//
// This section is only compiled when kUseUnwound3x3DotProduct versions of
// templated functions are selected.
template <int32 stride, QuantizationType quantization_type>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseUnwound3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kNoMultiplication, stride> {
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int input_width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->input_depth;
const int stride_val = function_params->stride;
const int four_over_stride = function_params->four_over_stride;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
const int bias_increment = function_params->bias_increment;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int width_micro_stride = 4 * 8;
const int depth_micro_stride =
width_micro_stride * input_width_overall_micro_repeats;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_multiplier = function_params->output_multiplier;
const int32 output_shift = function_params->output_shift;
const int32 output_offset = function_params->output_offset;
// Simulate NEON-register transposition of subset of filter.
int8 filter_bank_a_0[4][4]; // Depth 4, width 4.
int8 filter_bank_a_1[4][4];
int8 filter_bank_a_2[4][4];
int8 filter_bank_b_0[4][4];
int8 filter_bank_b_1[4][4];
int8 filter_bank_b_2[4][4];
// Simulate NEON-register input data concatenation + sub-selection.
// Also sub-block, height 3, depth 4, width 4.
uint8 output_values[4]; // Sub-block, depth 4.
// selected_data has format Depth 4, width 4.
int8 left_bank_0[4][4];
int8 left_bank_1[4][4];
int8 left_bank_2[4][4];
int8 right_bank_0[4][4];
int8 right_bank_1[4][4];
int8 right_bank_2[4][4];
memset(right_bank_0[0], 0, 16);
memset(right_bank_1[0], 0, 16);
memset(right_bank_2[0], 0, 16);
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
const int8* filter_block =
filter_workspace + shuffled_filter_increment * j_depth;
memcpy(filter_bank_a_0, filter_block, 16);
memcpy(filter_bank_b_0, filter_block + 16, 16);
memcpy(filter_bank_a_1, filter_block + 32, 16);
memcpy(filter_bank_b_1, filter_block + 48, 16);
memcpy(filter_bank_a_2, filter_block + 64, 16);
memcpy(filter_bank_b_2, filter_block + 80, 16);
for (int s = 0; s < 2; ++s) {
// Work through one slice, by row, at a time.
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* scratch_data =
scratch_block_data +
workspace_height_stride * k_height * stride_val +
depth_micro_stride * j_depth;
uint8* output_data =
output_block_data + output_height_stride * k_height + 8 * j_depth;
const int8* input_data_0 = scratch_data + s * 2 * 8;
// Load first sub-micro block of data into operational banks.
memcpy(left_bank_0[0], input_data_0, 16);
memcpy(left_bank_1[0], input_data_0 + workspace_height_stride, 16);
memcpy(left_bank_2[0], input_data_0 + 2 * workspace_height_stride,
16);
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width = i_width == output_width_micro_repeats
? residual_width
: four_over_stride;
TFLITE_DCHECK_LE(output_width * stride_val, 4);
const int8* input_data =
input_data_0 + width_micro_stride * i_width;
const bool no_right_block = (output_width - 1) * stride_val < 2;
// Load next sub-micro block of data.
if (!no_right_block) {
memcpy(right_bank_0[0], input_data + width_micro_stride, 16);
memcpy(right_bank_1[0],
input_data + workspace_height_stride + width_micro_stride,
16);
memcpy(
right_bank_2[0],
input_data + 2 * workspace_height_stride + width_micro_stride,
16);
}
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
// Operate on depth of 4 in batches.
for (int d = 0; d < 4; ++d) {
int32 acc = 0;
for (int x = 0; x < 4; ++x) {
int32 input_val = left_bank_0[d][x];
int32 filter_val = filter_bank_a_0[d][x];
acc += filter_val * input_val;
}
for (int x = 0; x < 4; ++x) {
int32 input_val = left_bank_1[d][x];
int32 filter_val = filter_bank_a_1[d][x];
acc += filter_val * input_val;
}
for (int x = 0; x < 4; ++x) {
int32 input_val = left_bank_2[d][x];
int32 filter_val = filter_bank_a_2[d][x];
acc += filter_val * input_val;
}
acc += bias_data[d];
acc = reference_ops::depthwise_conv::DepthwiseConvRound<
DepthwiseConvOutputRounding::kUpward>(
acc, output_multiplier, output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_values[d] = static_cast<uint8>(acc);
}
for (int d = 0; d < 4; ++d) {
output_data[depth * (four_over_stride * i_width + x) + 4 * s +
d] = output_values[d];
}
// Simulate shifting instructions.
if (stride_val == 1) {
for (int depth_index = 0; depth_index < 4; ++depth_index) {
for (int z = 0; z < 3; ++z) {
left_bank_0[depth_index][z] =
left_bank_0[depth_index][z + 1];
left_bank_1[depth_index][z] =
left_bank_1[depth_index][z + 1];
left_bank_2[depth_index][z] =
left_bank_2[depth_index][z + 1];
}
left_bank_0[depth_index][3] = right_bank_0[depth_index][0];
left_bank_1[depth_index][3] = right_bank_1[depth_index][0];
left_bank_2[depth_index][3] = right_bank_2[depth_index][0];
for (int z = 0; z < 3; ++z) {
right_bank_0[depth_index][z] =
right_bank_0[depth_index][z + 1];
right_bank_1[depth_index][z] =
right_bank_1[depth_index][z + 1];
right_bank_2[depth_index][z] =
right_bank_2[depth_index][z + 1];
}
}
} else {
for (int depth_index = 0; depth_index < 4; ++depth_index) {
for (int z = 0; z < 2; ++z) {
left_bank_0[depth_index][z] =
left_bank_0[depth_index][z + 2];
left_bank_1[depth_index][z] =
left_bank_1[depth_index][z + 2];
left_bank_2[depth_index][z] =
left_bank_2[depth_index][z + 2];
}
left_bank_0[depth_index][2] = right_bank_0[depth_index][0];
left_bank_1[depth_index][2] = right_bank_1[depth_index][0];
left_bank_2[depth_index][2] = right_bank_2[depth_index][0];
left_bank_0[depth_index][3] = right_bank_0[depth_index][1];
left_bank_1[depth_index][3] = right_bank_1[depth_index][1];
left_bank_2[depth_index][3] = right_bank_2[depth_index][1];
for (int z = 0; z < 2; ++z) {
right_bank_0[depth_index][z] =
right_bank_0[depth_index][z + 2];
right_bank_1[depth_index][z] =
right_bank_1[depth_index][z + 2];
right_bank_2[depth_index][z] =
right_bank_2[depth_index][z + 2];
}
}
}
}
}
}
bias_data += bias_increment;
// Move filter for second sub-block into operational filter.
for (int z = 0; z < 4; ++z) {
for (int x = 0; x < 4; ++x) {
filter_bank_a_0[z][x] = filter_bank_b_0[z][x];
filter_bank_a_1[z][x] = filter_bank_b_1[z][x];
filter_bank_a_2[z][x] = filter_bank_b_2[z][x];
}
}
}
}
}
};
template <int32 stride, QuantizationType quantization_type>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseUnwound3x3DotProduct, quantization_type,
DepthwiseConvDepthMultiplication::kUnitInputDepth, stride> {
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
const int workspace_height_stride =
function_params->workspace_height_stride;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int output_depth = function_params->output_depth;
const int stride_val = function_params->stride;
const int four_over_stride = function_params->four_over_stride;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
const int bias_increment = function_params->bias_increment;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_multiplier = function_params->output_multiplier;
const int32 output_shift = function_params->output_shift;
const int32 output_offset = function_params->output_offset;
TFLITE_DCHECK(depth_micro_repeats > 0);
TFLITE_DCHECK_EQ(bias_increment, 4);
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
// Simulate NEON-register transposition of subset of filter.
int8 filter_bank_a_0[4][4]; // Depth 4, width 4.
int8 filter_bank_a_1[4][4];
int8 filter_bank_a_2[4][4];
int8 filter_bank_b_0[4][4];
int8 filter_bank_b_1[4][4];
int8 filter_bank_b_2[4][4];
// Simulate NEON-register input data concatenation + sub-selection.
// Also sub-block, height 3, depth 4, width 4.
int8 input_bank_0[8];
int8 input_bank_1[8];
int8 input_bank_2[8];
TFLITE_DCHECK_GE(depth_micro_repeats, 1);
uint8 output_values[2][4]; // Sub-block, depth 4.
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
memcpy(filter_bank_a_0, filter_workspace, 16);
memcpy(filter_bank_b_0, filter_workspace + 16, 16);
memcpy(filter_bank_a_1, filter_workspace + 32, 16);
memcpy(filter_bank_b_1, filter_workspace + 48, 16);
memcpy(filter_bank_a_2, filter_workspace + 64, 16);
memcpy(filter_bank_b_2, filter_workspace + 80, 16);
// Work through one slice, by row, at a time.
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* scratch_data =
scratch_block_data +
workspace_height_stride * k_height * stride_val;
uint8* output_data =
output_block_data + output_height_stride * k_height + 8 * j_depth;
memcpy(input_bank_0, scratch_data, 4);
memcpy(input_bank_1, scratch_data + workspace_height_stride, 4);
memcpy(input_bank_2, scratch_data + 2 * workspace_height_stride, 4);
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width = i_width == output_width_micro_repeats
? residual_width
: four_over_stride;
TFLITE_DCHECK_LE(output_width * stride_val, 4);
const int8* input_data = scratch_data + 4 * i_width;
memcpy(input_bank_0 + 4, input_data + 4, 4);
memcpy(input_bank_1 + 4, input_data + workspace_height_stride + 4, 4);
memcpy(input_bank_2 + 4, input_data + 2 * workspace_height_stride + 4,
4);
// Iterate over input width shifts within 4x4 blocks.
for (int w = 0; w < output_width; ++w) {
constexpr int offset =
0; // Shift input instead of offset in multiply-accumulate.
{
const int s = 0;
for (int d = 0; d < 4; ++d) {
int32 acc = bias_data[s * 4 + d];
for (int x = 0; x < 4; ++x) {
int32 input_val_0 = input_bank_0[offset + x];
int32 filter_val_0 = filter_bank_a_0[d][x];
acc += filter_val_0 * input_val_0;
int32 input_val_1 = input_bank_1[offset + x];
int32 filter_val_1 = filter_bank_a_1[d][x];
acc += filter_val_1 * input_val_1;
int32 input_val_2 = input_bank_2[offset + x];
int32 filter_val_2 = filter_bank_a_2[d][x];
acc += filter_val_2 * input_val_2;
}
acc = reference_ops::depthwise_conv::DepthwiseConvRound<
DepthwiseConvOutputRounding::kUpward>(
acc, output_multiplier, output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_values[s][d] = static_cast<uint8>(acc);
output_data[s * 4 + d] = output_values[s][d];
}
}
{
const int s = 1;
for (int d = 0; d < 4; ++d) {
int32 acc = bias_data[s * 4 + d];
for (int x = 0; x < 4; ++x) {
int32 input_val_0 = input_bank_0[offset + x];
int32 filter_val_0 = filter_bank_b_0[d][x];
acc += filter_val_0 * input_val_0;
int32 input_val_1 = input_bank_1[offset + x];
int32 filter_val_1 = filter_bank_b_1[d][x];
acc += filter_val_1 * input_val_1;
int32 input_val_2 = input_bank_2[offset + x];
int32 filter_val_2 = filter_bank_b_2[d][x];
acc += filter_val_2 * input_val_2;
}
acc = reference_ops::depthwise_conv::DepthwiseConvRound<
DepthwiseConvOutputRounding::kUpward>(
acc, output_multiplier, output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_values[s][d] = static_cast<uint8>(acc);
output_data[s * 4 + d] = output_values[s][d];
}
}
// Simulate register shifts.
for (int i = 0; i < (8 - stride_val); ++i) {
input_bank_0[i] = input_bank_0[i + stride_val];
input_bank_1[i] = input_bank_1[i + stride_val];
input_bank_2[i] = input_bank_2[i + stride_val];
}
output_data += output_depth;
}
}
}
bias_data += 2 * bias_increment;
filter_workspace += shuffled_filter_increment;
}
}
};
// The preceding section is only compiled when kUseUnwound3x3DotProduct versions
// of templated functions are selected.
//
// End of code section containing intermediate code transformation.
#ifdef USE_NEON
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/1> {
static inline uint8x8_t vqmovxn_s16(int16x8_t x) { return vqmovun_s16(x); }
static inline uint8x8_t util_vmin_x8(uint8x8_t a, uint8x8_t b) {
return vmin_u8(a, b);
}
static inline uint8x8_t util_vmax_x8(uint8x8_t a, uint8x8_t b) {
return vmax_u8(a, b);
}
static inline uint8x16_t util_vminq_x8(uint8x16_t a, uint8x16_t b) {
return vminq_u8(a, b);
}
static inline uint8x16_t util_vmaxq_x8(uint8x16_t a, uint8x16_t b) {
return vmaxq_u8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kNonPerChannelUint8;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int input_width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->input_depth;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int width_micro_stride = 4 * 8;
const int depth_micro_stride =
width_micro_stride * input_width_overall_micro_repeats;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_multiplier = function_params->output_multiplier;
const int32 output_shift = function_params->output_shift;
const int32 output_offset = function_params->output_offset;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const uint8x16_t output_activation_min_vec =
vdupq_n_u8(static_cast<uint8>(output_activation_min));
const uint8x16_t output_activation_max_vec =
vdupq_n_u8(static_cast<uint8>(output_activation_max));
const int8* input_data_depthwise = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_depthwise = output_block_data;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
// Simulate NEON-register transposition of subset of filter.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_2_b;
int8x16_t filter_reg_0_a_shifted;
int8x16_t filter_reg_1_a_shifted;
int8x16_t filter_reg_2_a_shifted;
filter_reg_0_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
if (block_height == 4) {
for (int s = 0; s < 2; ++s) {
// Work through one slice, by row, at a time.
const int8* input_data_base = input_data_depthwise + 2 * 8 * s;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise + 4 * s;
const int8* next_input_data = input_data_base;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
const int32x4_t adjusted_bias_data = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg = vld1q_s8(next_input_data);
int8x16_t left_bank_1_reg =
vld1q_s8(next_input_data + workspace_height_stride);
int8x16_t left_bank_2_reg =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
int8x16_t left_bank_3_reg =
vld1q_s8(next_input_data + 3 * workspace_height_stride);
int8x16_t left_bank_4_reg =
vld1q_s8(next_input_data + 4 * workspace_height_stride);
int8x16_t left_bank_5_reg =
vld1q_s8(next_input_data + 5 * workspace_height_stride);
int32x4_t acc0;
int32x4_t acc1;
int32x4_t acc2;
int32x4_t acc3;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
for (int i_width = 0; i_width < output_width_micro_repeats;
++i_width) {
next_input_data += width_micro_stride;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += depth;
}
// Load next sub-micro block of data.
int8x16_t right_bank_0_reg;
int8x16_t right_bank_1_reg;
int8x16_t right_bank_2_reg;
int8x16_t right_bank_3_reg;
int8x16_t right_bank_4_reg;
int8x16_t right_bank_5_reg;
// Loading of next block always valid.
right_bank_0_reg = vld1q_s8(next_input_data);
right_bank_1_reg =
vld1q_s8(next_input_data + workspace_height_stride);
right_bank_2_reg =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
right_bank_3_reg =
vld1q_s8(next_input_data + 3 * workspace_height_stride);
right_bank_4_reg =
vld1q_s8(next_input_data + 4 * workspace_height_stride);
right_bank_5_reg =
vld1q_s8(next_input_data + 5 * workspace_height_stride);
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a_shifted, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a_shifted, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a_shifted, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a_shifted, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a_shifted, left_bank_3_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a_shifted, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
left_bank_0_reg = vrev32q_u16(left_bank_0_reg);
left_bank_1_reg = vrev32q_u16(left_bank_1_reg);
left_bank_2_reg = vrev32q_u16(left_bank_2_reg);
left_bank_3_reg = vrev32q_u16(left_bank_3_reg);
left_bank_4_reg = vrev32q_u16(left_bank_4_reg);
left_bank_5_reg = vrev32q_u16(left_bank_5_reg);
vtrn1_s8x2_in_place(&left_bank_0_reg, &right_bank_0_reg);
vtrn1_s8x2_in_place(&left_bank_1_reg, &right_bank_1_reg);
vtrn1_s8x2_in_place(&left_bank_2_reg, &right_bank_2_reg);
vtrn1_s8x2_in_place(&left_bank_3_reg, &right_bank_3_reg);
vtrn1_s8x2_in_place(&left_bank_4_reg, &right_bank_4_reg);
vtrn1_s8x2_in_place(&left_bank_5_reg, &right_bank_5_reg);
output_data += depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a_shifted, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a_shifted, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a_shifted, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a_shifted, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a_shifted, left_bank_3_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a_shifted, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
left_bank_0_reg = right_bank_0_reg;
left_bank_1_reg = right_bank_1_reg;
left_bank_2_reg = right_bank_2_reg;
left_bank_3_reg = right_bank_3_reg;
left_bank_4_reg = right_bank_4_reg;
left_bank_5_reg = right_bank_5_reg;
output_data += depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
}
}
if (residual_width > 0) {
next_input_data += width_micro_stride;
const int output_width = residual_width;
// Load next sub-micro block of data.
int8x16_t right_bank_0_reg;
int8x16_t right_bank_1_reg;
int8x16_t right_bank_2_reg;
int8x16_t right_bank_3_reg;
int8x16_t right_bank_4_reg;
int8x16_t right_bank_5_reg;
// Logic: (output_width - 1) * stride_val < 2.
const bool no_right_block = output_width < 3;
if (no_right_block) {
// Only needed for sanitizer checks.
right_bank_0_reg = vdupq_n_s8(0);
right_bank_1_reg = vdupq_n_s8(0);
right_bank_2_reg = vdupq_n_s8(0);
right_bank_3_reg = vdupq_n_s8(0);
right_bank_4_reg = vdupq_n_s8(0);
right_bank_5_reg = vdupq_n_s8(0);
} else {
right_bank_0_reg = vld1q_s8(next_input_data);
right_bank_1_reg =
vld1q_s8(next_input_data + workspace_height_stride);
right_bank_2_reg =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
right_bank_3_reg =
vld1q_s8(next_input_data + 3 * workspace_height_stride);
right_bank_4_reg =
vld1q_s8(next_input_data + 4 * workspace_height_stride);
right_bank_5_reg =
vld1q_s8(next_input_data + 5 * workspace_height_stride);
}
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
biregister_rotate_8(&left_bank_0_reg, &right_bank_0_reg);
biregister_rotate_8(&left_bank_1_reg, &right_bank_1_reg);
biregister_rotate_8(&left_bank_2_reg, &right_bank_2_reg);
biregister_rotate_8(&left_bank_3_reg, &right_bank_3_reg);
biregister_rotate_8(&left_bank_4_reg, &right_bank_4_reg);
biregister_rotate_8(&left_bank_5_reg, &right_bank_5_reg);
output_data += depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
}
}
input_data_base += 4 * workspace_height_stride;
output_data_base += 4 * output_height_stride;
// Move to next sub-block: advance to second set of filters, to new
// bias.
filter_reg_0_a = filter_reg_0_b;
filter_reg_1_a = filter_reg_1_b;
filter_reg_2_a = filter_reg_2_b;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
}
} else {
const int8* input_data_base = input_data_depthwise;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise;
const int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* next_input_data = input_data_base;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg_a = vld1q_s8(next_input_data);
int8x16_t left_bank_1_reg_a =
vld1q_s8(next_input_data + workspace_height_stride);
int8x16_t left_bank_2_reg_a =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
int8x16_t left_bank_0_reg_b = vld1q_s8(next_input_data + 16);
int8x16_t left_bank_1_reg_b =
vld1q_s8(next_input_data + workspace_height_stride + 16);
int8x16_t left_bank_2_reg_b =
vld1q_s8(next_input_data + 2 * workspace_height_stride + 16);
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
next_input_data += width_micro_stride;
const int output_width =
i_width == output_width_micro_repeats ? residual_width : 4;
int8x16_t right_bank_0_reg_a;
int8x16_t right_bank_1_reg_a;
int8x16_t right_bank_2_reg_a;
int8x16_t right_bank_0_reg_b;
int8x16_t right_bank_1_reg_b;
int8x16_t right_bank_2_reg_b;
// Logic: (output_width - 1) * stride_val < 2.
const bool no_right_block = output_width < 3;
// Load next sub-micro block of data.
if (no_right_block) {
// Only needed for sanitizer checks.
right_bank_0_reg_a = vdupq_n_s8(0);
right_bank_1_reg_a = vdupq_n_s8(0);
right_bank_2_reg_a = vdupq_n_s8(0);
right_bank_0_reg_b = vdupq_n_s8(0);
right_bank_1_reg_b = vdupq_n_s8(0);
right_bank_2_reg_b = vdupq_n_s8(0);
} else {
right_bank_0_reg_a = vld1q_s8(next_input_data);
right_bank_1_reg_a =
vld1q_s8(next_input_data + workspace_height_stride);
right_bank_2_reg_a =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
right_bank_0_reg_b = vld1q_s8(next_input_data + 16);
right_bank_1_reg_b =
vld1q_s8(next_input_data + workspace_height_stride + 16);
right_bank_2_reg_b =
vld1q_s8(next_input_data + 2 * workspace_height_stride + 16);
}
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
int32x4_t acc_a = adjusted_bias_data_a;
int32x4_t acc_b = adjusted_bias_data_b;
acc_a = vdotq_s32(acc_a, filter_reg_0_a, left_bank_0_reg_a);
acc_a = vdotq_s32(acc_a, filter_reg_1_a, left_bank_1_reg_a);
acc_a = vdotq_s32(acc_a, filter_reg_2_a, left_bank_2_reg_a);
acc_b = vdotq_s32(acc_b, filter_reg_0_b, left_bank_0_reg_b);
acc_b = vdotq_s32(acc_b, filter_reg_1_b, left_bank_1_reg_b);
acc_b = vdotq_s32(acc_b, filter_reg_2_b, left_bank_2_reg_b);
// Fixed-point multiplication.
acc_a = vqrdmulhq_n_s32(acc_a, output_multiplier);
acc_b = vqrdmulhq_n_s32(acc_b, output_multiplier);
acc_a = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc_a, -output_shift);
acc_b = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc_b, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_0 =
vcombine_s16(vqmovn_s32(acc_a), vqmovn_s32(acc_b));
acc_s16_0_0 = vqaddq_s16(acc_s16_0_0, output_offset_vec);
// Apply the activation function.
uint8x8_t acc_u8_0_0 = vqmovxn_s16(acc_s16_0_0);
acc_u8_0_0 = util_vmax_x8(acc_u8_0_0,
vget_low_u8(output_activation_min_vec));
acc_u8_0_0 = util_vmin_x8(acc_u8_0_0,
vget_low_u8(output_activation_max_vec));
util_vst1_x8(output_data, acc_u8_0_0);
biregister_rotate_8(&left_bank_0_reg_a, &right_bank_0_reg_a);
biregister_rotate_8(&left_bank_1_reg_a, &right_bank_1_reg_a);
biregister_rotate_8(&left_bank_2_reg_a, &right_bank_2_reg_a);
biregister_rotate_8(&left_bank_0_reg_b, &right_bank_0_reg_b);
biregister_rotate_8(&left_bank_1_reg_b, &right_bank_1_reg_b);
biregister_rotate_8(&left_bank_2_reg_b, &right_bank_2_reg_b);
output_data += depth;
}
}
input_data_base += workspace_height_stride;
output_data_base += output_height_stride;
}
}
input_data_depthwise += depth_micro_stride;
output_data_depthwise += 8;
}
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/2> {
static inline uint8x8_t vqmovxn_s16(int16x8_t x) { return vqmovun_s16(x); }
static inline uint8x8_t util_vmin_x8(uint8x8_t a, uint8x8_t b) {
return vmin_u8(a, b);
}
static inline uint8x8_t util_vmax_x8(uint8x8_t a, uint8x8_t b) {
return vmax_u8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kNonPerChannelUint8;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int input_width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->input_depth;
constexpr int kStrideVal = 2;
constexpr int kFourOverStride = 2;
TFLITE_DCHECK_EQ(function_params->stride, kStrideVal);
TFLITE_DCHECK_EQ(function_params->four_over_stride, kFourOverStride);
const int workspace_width_micro_repeats =
function_params->workspace_width_micro_repeats;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int width_micro_stride = 4 * 8;
const int depth_micro_stride =
width_micro_stride * input_width_overall_micro_repeats;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_multiplier = function_params->output_multiplier;
const int32 output_shift = function_params->output_shift;
const int32 output_offset = function_params->output_offset;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
// This version only does min/max on 64 bits.
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const uint8x8_t output_activation_min_vec =
vdup_n_u8(static_cast<uint8>(output_activation_min));
const uint8x8_t output_activation_max_vec =
vdup_n_u8(static_cast<uint8>(output_activation_max));
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
TFLITE_DCHECK_LE(block_height, 2);
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
const int8* filter_block =
filter_workspace + shuffled_filter_increment * j_depth;
if (block_height == 2) {
for (int s = 0; s < 2; ++s) {
// Simulate NEON-register transposition of subset of filter.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_2_a;
filter_reg_0_a = vld1q_s8(filter_block + s * 16);
filter_reg_1_a = vld1q_s8(filter_block + s * 16 + 32);
filter_reg_2_a = vld1q_s8(filter_block + s * 16 + 64);
const int8* scratch_data =
scratch_block_data + depth_micro_stride * j_depth;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
const int8* input_data_0 = scratch_data + s * 2 * 8;
const int32x4_t adjusted_bias_data = vld1q_s32(bias_data);
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg = vld1q_s8(input_data_0);
int8x16_t left_bank_1_reg =
vld1q_s8(input_data_0 + workspace_height_stride);
int8x16_t left_bank_2_reg =
vld1q_s8(input_data_0 + 2 * workspace_height_stride);
int8x16_t left_bank_3_reg =
vld1q_s8(input_data_0 + 3 * workspace_height_stride);
int8x16_t left_bank_4_reg =
vld1q_s8(input_data_0 + 4 * workspace_height_stride);
int8x16_t right_bank_0_reg;
int8x16_t right_bank_1_reg;
int8x16_t right_bank_2_reg;
int8x16_t right_bank_3_reg;
int8x16_t right_bank_4_reg;
int32x4_t acc0;
int32x4_t acc1;
int16x8_t acc_s16_0_1;
uint8x8_t acc_u8;
int i_width = 0;
// When output_width_micro_repeats <
// output_width_overall_micro_repeats, 0 < residual_width <= 2, and so
// residual_width == 1 is then true iff residual_width < 2.
const int adjusted_width_micro_repeats =
(output_width_micro_repeats <
output_width_overall_micro_repeats) &&
(residual_width == 1)
? output_width_micro_repeats
: output_width_overall_micro_repeats;
for (; i_width < adjusted_width_micro_repeats; ++i_width) {
const int output_width = kFourOverStride;
TFLITE_DCHECK_LE(output_width * kStrideVal, 4);
const int8* input_data =
input_data_0 + width_micro_stride * i_width;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
right_bank_0_reg = vld1q_s8(input_data + width_micro_stride);
right_bank_1_reg = vld1q_s8(input_data + width_micro_stride +
workspace_height_stride);
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_2_reg);
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data + depth * 2 * i_width + 4 * s;
right_bank_2_reg = vld1q_s8(input_data + width_micro_stride +
2 * workspace_height_stride);
right_bank_3_reg = vld1q_s8(input_data + width_micro_stride +
3 * workspace_height_stride);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_3_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_4_reg);
right_bank_4_reg = vld1q_s8(input_data + width_micro_stride +
4 * workspace_height_stride);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
left_bank_0_reg = vrev32q_u16(left_bank_0_reg);
left_bank_1_reg = vrev32q_u16(left_bank_1_reg);
left_bank_2_reg = vrev32q_u16(left_bank_2_reg);
left_bank_3_reg = vrev32q_u16(left_bank_3_reg);
left_bank_4_reg = vrev32q_u16(left_bank_4_reg);
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
vtrn1_s8x2_in_place(&left_bank_0_reg, &right_bank_0_reg);
vtrn1_s8x2_in_place(&left_bank_1_reg, &right_bank_1_reg);
vtrn1_s8x2_in_place(&left_bank_2_reg, &right_bank_2_reg);
vst1_lane_8x4(output_data_base, acc_u8, 0);
vst1_lane_8x4(output_data_base + output_height_stride, acc_u8, 1);
vtrn1_s8x2_in_place(&left_bank_3_reg, &right_bank_3_reg);
vtrn1_s8x2_in_place(&left_bank_4_reg, &right_bank_4_reg);
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_2_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_3_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_4_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
vst1_lane_8x4(output_data_base + depth, acc_u8, 0);
vst1_lane_8x4(output_data_base + depth + output_height_stride,
acc_u8, 1);
left_bank_0_reg = right_bank_0_reg;
left_bank_1_reg = right_bank_1_reg;
left_bank_2_reg = right_bank_2_reg;
left_bank_3_reg = right_bank_3_reg;
left_bank_4_reg = right_bank_4_reg;
}
for (; i_width < output_width_overall_micro_repeats; ++i_width) {
TFLITE_DCHECK_NE(residual_width, kFourOverStride);
// No need to load next ("right") block of data.
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data + depth * 2 * i_width + 4 * s;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_3_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_4_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
uint8x8_t acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
vst1_lane_8x4(output_data_base, acc_u8, 0);
vst1_lane_8x4(output_data_base + output_height_stride, acc_u8, 1);
left_bank_0_reg = vrev32q_u16(left_bank_0_reg);
left_bank_1_reg = vrev32q_u16(left_bank_1_reg);
left_bank_2_reg = vrev32q_u16(left_bank_2_reg);
left_bank_3_reg = vrev32q_u16(left_bank_3_reg);
left_bank_4_reg = vrev32q_u16(left_bank_4_reg);
vtrn1_s8x2_in_place(&left_bank_0_reg, &right_bank_0_reg);
vtrn1_s8x2_in_place(&left_bank_1_reg, &right_bank_1_reg);
vtrn1_s8x2_in_place(&left_bank_2_reg, &right_bank_2_reg);
vtrn1_s8x2_in_place(&left_bank_3_reg, &right_bank_3_reg);
vtrn1_s8x2_in_place(&left_bank_4_reg, &right_bank_4_reg);
}
}
bias_data += kBiasIncrement;
}
} else {
// block_height == 1.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_b;
filter_reg_0_a = vld1q_s8(filter_block);
filter_reg_1_a = vld1q_s8(filter_block + 32);
filter_reg_2_a = vld1q_s8(filter_block + 64);
filter_reg_0_b = vld1q_s8(filter_block + 16);
filter_reg_1_b = vld1q_s8(filter_block + 16 + 32);
filter_reg_2_b = vld1q_s8(filter_block + 16 + 64);
const int8* scratch_data =
scratch_block_data + depth_micro_stride * j_depth;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
const int8* input_data_0 = scratch_data;
const int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg_a = vld1q_s8(input_data_0);
int8x16_t left_bank_1_reg_a =
vld1q_s8(input_data_0 + workspace_height_stride);
int8x16_t left_bank_2_reg_a =
vld1q_s8(input_data_0 + 2 * workspace_height_stride);
int8x16_t left_bank_0_reg_b = vld1q_s8(input_data_0 + 16);
int8x16_t left_bank_1_reg_b =
vld1q_s8(input_data_0 + workspace_height_stride + 16);
int8x16_t left_bank_2_reg_b =
vld1q_s8(input_data_0 + 2 * workspace_height_stride + 16);
int8x16_t right_bank_0_reg_a;
int8x16_t right_bank_1_reg_a;
int8x16_t right_bank_2_reg_a;
int8x16_t right_bank_0_reg_b;
int8x16_t right_bank_1_reg_b;
int8x16_t right_bank_2_reg_b;
int32x4_t acc0_a;
int32x4_t acc0_b;
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width = i_width == output_width_micro_repeats
? residual_width
: kFourOverStride;
TFLITE_DCHECK_LE(output_width * kStrideVal, 4);
const int8* input_data = input_data_0 + width_micro_stride * i_width;
const bool no_right_block = i_width == output_width_micro_repeats &&
output_width_overall_micro_repeats ==
workspace_width_micro_repeats;
if (!no_right_block) {
// Load next sub-micro block of data.
right_bank_0_reg_a = vld1q_s8(input_data + width_micro_stride);
right_bank_1_reg_a = vld1q_s8(input_data + width_micro_stride +
workspace_height_stride);
right_bank_2_reg_a = vld1q_s8(input_data + width_micro_stride +
2 * workspace_height_stride);
right_bank_0_reg_b = vld1q_s8(input_data + width_micro_stride + 16);
right_bank_1_reg_b = vld1q_s8(input_data + width_micro_stride +
workspace_height_stride + 16);
right_bank_2_reg_b = vld1q_s8(input_data + width_micro_stride +
2 * workspace_height_stride + 16);
}
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data + depth * 2 * i_width;
// Iterate over input width shifts within 4x4 blocks.
{
acc0_a = adjusted_bias_data_a;
acc0_b = adjusted_bias_data_b;
acc0_a = vdotq_s32(acc0_a, filter_reg_0_a, left_bank_0_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_1_a, left_bank_1_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_2_a, left_bank_2_reg_a);
acc0_b = vdotq_s32(acc0_b, filter_reg_0_b, left_bank_0_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_1_b, left_bank_1_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_2_b, left_bank_2_reg_b);
// Fixed-point multiplication.
acc0_a = vqrdmulhq_n_s32(acc0_a, output_multiplier);
acc0_b = vqrdmulhq_n_s32(acc0_b, output_multiplier);
acc0_a = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0_a, -output_shift);
acc0_b = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0_b, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0_a), vqmovn_s32(acc0_b));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
uint8x8_t acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
util_vst1_x8(output_data_base, acc_u8);
left_bank_0_reg_a = vrev32q_u16(left_bank_0_reg_a);
left_bank_1_reg_a = vrev32q_u16(left_bank_1_reg_a);
left_bank_2_reg_a = vrev32q_u16(left_bank_2_reg_a);
left_bank_0_reg_b = vrev32q_u16(left_bank_0_reg_b);
left_bank_1_reg_b = vrev32q_u16(left_bank_1_reg_b);
left_bank_2_reg_b = vrev32q_u16(left_bank_2_reg_b);
vtrn1_s8x2_in_place(&left_bank_0_reg_a, &right_bank_0_reg_a);
vtrn1_s8x2_in_place(&left_bank_1_reg_a, &right_bank_1_reg_a);
vtrn1_s8x2_in_place(&left_bank_2_reg_a, &right_bank_2_reg_a);
vtrn1_s8x2_in_place(&left_bank_0_reg_b, &right_bank_0_reg_b);
vtrn1_s8x2_in_place(&left_bank_1_reg_b, &right_bank_1_reg_b);
vtrn1_s8x2_in_place(&left_bank_2_reg_b, &right_bank_2_reg_b);
}
if (output_width > 1) {
acc0_a = adjusted_bias_data_a;
acc0_b = adjusted_bias_data_b;
acc0_a = vdotq_s32(acc0_a, filter_reg_0_a, left_bank_0_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_1_a, left_bank_1_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_2_a, left_bank_2_reg_a);
acc0_b = vdotq_s32(acc0_b, filter_reg_0_b, left_bank_0_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_1_b, left_bank_1_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_2_b, left_bank_2_reg_b);
// Fixed-point multiplication.
acc0_a = vqrdmulhq_n_s32(acc0_a, output_multiplier);
acc0_b = vqrdmulhq_n_s32(acc0_b, output_multiplier);
acc0_a = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0_a, -output_shift);
acc0_b = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0_b, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0_a), vqmovn_s32(acc0_b));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
uint8x8_t acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
util_vst1_x8(output_data_base + depth, acc_u8);
left_bank_0_reg_a = right_bank_0_reg_a;
left_bank_1_reg_a = right_bank_1_reg_a;
left_bank_2_reg_a = right_bank_2_reg_a;
left_bank_0_reg_b = right_bank_0_reg_b;
left_bank_1_reg_b = right_bank_1_reg_b;
left_bank_2_reg_b = right_bank_2_reg_b;
}
}
}
}
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/1> {
static inline uint8x8_t vqmovxn_s16(int16x8_t x) { return vqmovun_s16(x); }
static inline uint8x8_t util_vmin_x8(uint8x8_t a, uint8x8_t b) {
return vmin_u8(a, b);
}
static inline uint8x8_t util_vmax_x8(uint8x8_t a, uint8x8_t b) {
return vmax_u8(a, b);
}
static inline uint8x16_t util_vminq_x8(uint8x16_t a, uint8x16_t b) {
return vminq_u8(a, b);
}
static inline uint8x16_t util_vmaxq_x8(uint8x16_t a, uint8x16_t b) {
return vmaxq_u8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kNonPerChannelUint8;
TFLITE_DCHECK_EQ(function_params->stride, 1);
const int workspace_height_stride =
function_params->workspace_height_stride;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int output_depth = function_params->output_depth;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_multiplier = function_params->output_multiplier;
const int32 output_shift = function_params->output_shift;
const int32 output_offset = function_params->output_offset;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const uint8x16_t output_activation_min_vec =
vdupq_n_u8(static_cast<uint8>(output_activation_min));
const uint8x16_t output_activation_max_vec =
vdupq_n_u8(static_cast<uint8>(output_activation_max));
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_depthwise = output_block_data;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
// Simulate NEON-register transposition of subset of filter.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_2_b;
int8x16_t filter_reg_0_a_shifted;
int8x16_t filter_reg_1_a_shifted;
int8x16_t filter_reg_2_a_shifted;
filter_reg_0_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
// When output_width_micro_repeats < output_width_overall_micro_repeats,
// 0 < residual_width <= 2, and so residual_width == 1 is then true iff
// residual_width < 2.
const int adjusted_width_micro_repeats =
(output_width_micro_repeats < output_width_overall_micro_repeats) &&
(residual_width < 4)
? output_width_micro_repeats
: output_width_overall_micro_repeats;
if (block_height == 4) {
for (int s = 0; s < 2; ++s) {
// Work through one slice, by row, at a time.
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise + 4 * s;
const int8* next_input_data = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
const int32x4_t adjusted_bias_data = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
int8x16_t input_bank_a_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_b_reg; // left 2, right 2, left 3, right 3.
int8x16_t input_bank_c_reg; // left 4, right 4, left 5, right 5.
// Load first sub-micro block of data into operational banks.
input_bank_a_reg =
vld1q_dup_s8x4(next_input_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_a_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_a_reg, 2);
input_bank_b_reg = vld1q_dup_s8x4(
next_input_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 3 * workspace_height_stride,
input_bank_b_reg, 2);
input_bank_c_reg = vld1q_dup_s8x4(
next_input_data +
4 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 5 * workspace_height_stride,
input_bank_c_reg, 2);
int32x4_t acc0;
int32x4_t acc1;
int32x4_t acc2;
int32x4_t acc3;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg, 2);
int i_width = 0;
for (; i_width < adjusted_width_micro_repeats; ++i_width) {
next_input_data += 4;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg,
0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a, input_bank_c_reg,
2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += output_depth;
}
// Load next sub-micro block of data.
input_bank_a_reg =
vld1q_lane_8x4(next_input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 2 * workspace_height_stride,
input_bank_b_reg, 1);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 3 * workspace_height_stride,
input_bank_b_reg, 3);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 4 * workspace_height_stride,
input_bank_c_reg, 1);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 5 * workspace_height_stride,
input_bank_c_reg, 3);
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a_shifted,
input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a_shifted,
input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a_shifted,
input_bank_a_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a_shifted,
input_bank_b_reg, 0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a_shifted,
input_bank_b_reg, 2);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a_shifted,
input_bank_c_reg, 2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg,
0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg,
2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg,
0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg,
2);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a, input_bank_c_reg,
2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += output_depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a_shifted,
input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a_shifted,
input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a_shifted,
input_bank_a_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a_shifted,
input_bank_b_reg, 0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a_shifted,
input_bank_b_reg, 2);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a_shifted,
input_bank_c_reg, 2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg,
0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg,
2);
}
}
if (i_width < output_width_overall_micro_repeats) {
next_input_data += 4;
const int output_width = residual_width;
// Load next sub-micro block of data.
input_bank_a_reg =
vld1q_lane_8x4(next_input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 2 * workspace_height_stride,
input_bank_b_reg, 1);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 3 * workspace_height_stride,
input_bank_b_reg, 3);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 4 * workspace_height_stride,
input_bank_c_reg, 1);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 5 * workspace_height_stride,
input_bank_c_reg, 3);
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg,
0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a, input_bank_c_reg,
2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
acc2 = vqrdmulhq_n_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc2, -output_shift);
acc3 = vqrdmulhq_n_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc3, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
uint8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 8);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 8);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 8);
output_data += output_depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg,
0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg,
2);
}
}
// scratch_block_data += 4 * workspace_height_stride;
output_data_base += 4 * output_height_stride;
// Move to next sub-block: advance to second set of filters, to new
// bias.
filter_reg_0_a = filter_reg_0_b;
filter_reg_1_a = filter_reg_1_b;
filter_reg_2_a = filter_reg_2_b;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
}
} else {
// Block height < 4.
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise;
const int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* next_input_data =
scratch_block_data + k_height * workspace_height_stride;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
int8x16_t input_bank_p_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_q_reg; // left 2, right 2, left 3, right 3.
// Load first sub-micro block of data into operational banks.
input_bank_p_reg =
vld1q_dup_s8x4(next_input_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_p_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_p_reg, 2);
input_bank_q_reg = vld1q_dup_s8x4(
next_input_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
next_input_data += 4;
const int output_width =
i_width == output_width_micro_repeats ? residual_width : 4;
// Load next sub-micro block of data.
input_bank_p_reg =
vld1q_lane_8x4(next_input_data, input_bank_p_reg, 1);
input_bank_p_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_p_reg, 3);
input_bank_q_reg =
vld1q_lane_8x4(next_input_data + 2 * workspace_height_stride,
input_bank_q_reg, 1);
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
int32x4_t acc_a = adjusted_bias_data_a;
int32x4_t acc_b = adjusted_bias_data_b;
acc_a = vdotq_four_lane_s32(acc_a, filter_reg_0_a,
input_bank_p_reg, 0);
acc_a = vdotq_four_lane_s32(acc_a, filter_reg_1_a,
input_bank_p_reg, 2);
acc_a = vdotq_four_lane_s32(acc_a, filter_reg_2_a,
input_bank_q_reg, 0);
acc_b = vdotq_four_lane_s32(acc_b, filter_reg_0_b,
input_bank_p_reg, 0);
acc_b = vdotq_four_lane_s32(acc_b, filter_reg_1_b,
input_bank_p_reg, 2);
acc_b = vdotq_four_lane_s32(acc_b, filter_reg_2_b,
input_bank_q_reg, 0);
// Fixed-point multiplication.
acc_a = vqrdmulhq_n_s32(acc_a, output_multiplier);
acc_b = vqrdmulhq_n_s32(acc_b, output_multiplier);
acc_a = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc_a, -output_shift);
acc_b = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc_b, -output_shift);
// Add the output offset.
int16x8_t acc_s16_0_0 =
vcombine_s16(vqmovn_s32(acc_a), vqmovn_s32(acc_b));
acc_s16_0_0 = vqaddq_s16(acc_s16_0_0, output_offset_vec);
// Apply the activation function.
uint8x8_t acc_u8_0_0 = vqmovxn_s16(acc_s16_0_0);
acc_u8_0_0 = util_vmax_x8(acc_u8_0_0,
vget_low_u8(output_activation_min_vec));
acc_u8_0_0 = util_vmin_x8(acc_u8_0_0,
vget_low_u8(output_activation_max_vec));
util_vst1_x8(output_data, acc_u8_0_0);
input_bank_p_reg = vshrq_n_u64(input_bank_p_reg, 8);
input_bank_q_reg = vshrq_n_u64(input_bank_q_reg, 8);
output_data += output_depth;
}
}
output_data_base += output_height_stride;
}
}
output_data_depthwise += 8;
}
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kNonPerChannelUint8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/2> {
static inline uint8x8_t vqmovxn_s16(int16x8_t x) { return vqmovun_s16(x); }
static inline uint8x8_t util_vmin_x8(uint8x8_t a, uint8x8_t b) {
return vmin_u8(a, b);
}
static inline uint8x8_t util_vmax_x8(uint8x8_t a, uint8x8_t b) {
return vmax_u8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kNonPerChannelUint8;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int output_depth = function_params->output_depth;
constexpr int kStrideVal = 2;
TFLITE_DCHECK_EQ(function_params->stride, kStrideVal);
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_multiplier = function_params->output_multiplier;
const int32 output_shift = function_params->output_shift;
const int32 output_offset = function_params->output_offset;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
TFLITE_DCHECK_GE(depth_micro_repeats, 1);
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const uint8x16_t output_activation_min_vec =
vdupq_n_u8(static_cast<uint8>(output_activation_min));
const uint8x16_t output_activation_max_vec =
vdupq_n_u8(static_cast<uint8>(output_activation_max));
for (int j_depth = 0; j_depth < (depth_micro_repeats * 1 + 0); ++j_depth) {
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_2_b;
filter_reg_0_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
const int32x4_t adjusted_bias_data_s_0 = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_s_1 = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
if (block_height == 2) {
const int8* scratch_data = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
int8x16_t input_bank_a_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_b_reg; // left 2, right 2, left 3, right 3.
int8x16_t input_bank_c_reg; // left 4, right 4, xxx, xxx.
// Load first sub-micro block of data into operational banks.
input_bank_a_reg =
vld1q_dup_s8x4(scratch_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_a_reg = vld1q_lane_8x4(
scratch_data + workspace_height_stride, input_bank_a_reg, 2);
input_bank_b_reg = vld1q_dup_s8x4(
scratch_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
input_bank_b_reg = vld1q_lane_8x4(
scratch_data + 3 * workspace_height_stride, input_bank_b_reg, 2);
input_bank_c_reg = vld1q_dup_s8x4(
scratch_data +
4 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
int32x4_t acc0;
int32x4_t acc1;
// When output_width_micro_repeats < output_width_overall_micro_repeats,
// 0 < residual_width <= 2, and so residual_width == 1 is then true iff
// residual_width < 2.
const int adjusted_width_micro_repeats =
(output_width_micro_repeats < output_width_overall_micro_repeats) &&
(residual_width < 2)
? output_width_micro_repeats
: output_width_overall_micro_repeats;
int i_width = 0;
for (; i_width < adjusted_width_micro_repeats; ++i_width) {
const int8* input_data = scratch_data + 4 + 4 * i_width;
// Load next sub-micro block of data.
input_bank_a_reg = vld1q_lane_8x4(input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 2 * workspace_height_stride, input_bank_b_reg, 1);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 3 * workspace_height_stride, input_bank_b_reg, 3);
input_bank_c_reg = vld1q_lane_8x4(
input_data + 4 * workspace_height_stride, input_bank_c_reg, 1);
int16x8_t acc_s16_0_1;
uint8x8_t acc_u8_0_1;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data_s_0;
acc1 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_u8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_u8(output_activation_max_vec));
vst1_lane_8x4(output_data, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + output_height_stride, acc_u8_0_1, 1);
acc0 = adjusted_bias_data_s_1;
acc1 = adjusted_bias_data_s_1;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_b, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_b, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_u8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_u8(output_activation_max_vec));
vst1_lane_8x4(output_data + 4, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + 4 + output_height_stride, acc_u8_0_1,
1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
// output_width == four_over_stride.
acc0 = adjusted_bias_data_s_0;
acc1 = adjusted_bias_data_s_0;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 =
util_vmax_x8(acc_u8_0_1, vget_low_u8(output_activation_min_vec));
acc_u8_0_1 =
util_vmin_x8(acc_u8_0_1, vget_low_u8(output_activation_max_vec));
vst1_lane_8x4(output_data, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + output_height_stride, acc_u8_0_1, 1);
acc0 = adjusted_bias_data_s_1;
acc1 = adjusted_bias_data_s_1;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_b, input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_b, input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_b, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_b_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 =
util_vmax_x8(acc_u8_0_1, vget_low_u8(output_activation_min_vec));
acc_u8_0_1 =
util_vmin_x8(acc_u8_0_1, vget_low_u8(output_activation_max_vec));
vst1_lane_8x4(output_data + 4, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + 4 + output_height_stride, acc_u8_0_1, 1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
for (; i_width < output_width_overall_micro_repeats; ++i_width) {
// output_width == 1.
const int8* input_data = scratch_data + 4 + 4 * i_width;
// Load next sub-micro block of data.
input_bank_a_reg = vld1q_lane_8x4(input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 2 * workspace_height_stride, input_bank_b_reg, 1);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 3 * workspace_height_stride, input_bank_b_reg, 3);
input_bank_c_reg = vld1q_lane_8x4(
input_data + 4 * workspace_height_stride, input_bank_c_reg, 1);
int16x8_t acc_s16_0_1;
uint8x8_t acc_u8_0_1;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data_s_0;
acc1 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_u8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_u8(output_activation_max_vec));
vst1_lane_8x4(output_data, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + output_height_stride, acc_u8_0_1, 1);
acc0 = adjusted_bias_data_s_1;
acc1 = adjusted_bias_data_s_1;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_b, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_b, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_u8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_u8(output_activation_max_vec));
vst1_lane_8x4(output_data + 4, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + 4 + output_height_stride, acc_u8_0_1,
1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
}
} else {
TFLITE_DCHECK_EQ(block_height, 1);
// Work through one slice, by row, at a time.
const int8* scratch_data = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
int8x16_t input_bank_a_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_b_reg; // left 2, right 2, xxx, xxx.
// Load first sub-micro block of data into operational banks.
input_bank_a_reg =
vld1q_dup_s8x4(scratch_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_a_reg = vld1q_lane_8x4(
scratch_data + workspace_height_stride, input_bank_a_reg, 2);
input_bank_b_reg = vld1q_dup_s8x4(
scratch_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
int32x4_t acc0;
int32x4_t acc1;
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width =
i_width == output_width_micro_repeats ? residual_width : 2;
TFLITE_DCHECK_LE(output_width, 2);
TFLITE_DCHECK_GE(output_width, 1);
TFLITE_DCHECK_LE(output_width * kStrideVal, 4);
const int8* input_data = scratch_data + 4 + 4 * i_width;
// Load next sub-micro block of data.
input_bank_a_reg = vld1q_lane_8x4(input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 2 * workspace_height_stride, input_bank_b_reg, 1);
int16x8_t acc_s16_0_1;
uint8x8_t acc_u8_0_1;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
// Second sub-block accumulation.
acc1 = adjusted_bias_data_s_1;
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_a_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_a_reg, 2);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_u8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_u8(output_activation_max_vec));
// This stores the results for both sub-blocks together.
util_vst1_x8(output_data, acc_u8_0_1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
output_data += output_depth;
}
if (output_width == 2) {
acc0 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 = vqrdmulhq_n_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc0, -output_shift);
// Second sub-block accumulation.
acc1 = adjusted_bias_data_s_1;
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_a_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_a_reg, 2);
acc1 = vqrdmulhq_n_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::Run(
acc1, -output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_u8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_u8(output_activation_max_vec));
// This stores the results for both sub-blocks together.
util_vst1_x8(output_data, acc_u8_0_1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
output_data += output_depth;
}
}
}
}
}
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
uint8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/1> {
static inline int8x8_t vqmovxn_s16(int16x8_t x) { return vqmovn_s16(x); }
static inline int8x8_t util_vmin_x8(int8x8_t a, int8x8_t b) {
return vmin_s8(a, b);
}
static inline int8x8_t util_vmax_x8(int8x8_t a, int8x8_t b) {
return vmax_s8(a, b);
}
static inline int8x16_t util_vminq_x8(int8x16_t a, int8x16_t b) {
return vminq_s8(a, b);
}
static inline int8x16_t util_vmaxq_x8(int8x16_t a, int8x16_t b) {
return vmaxq_s8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kPerChannelInt8;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int input_width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->input_depth;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int width_micro_stride = 4 * 8;
const int depth_micro_stride =
width_micro_stride * input_width_overall_micro_repeats;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_offset = function_params->output_offset;
const int32* output_shift_per_channel =
function_params->output_shift_per_channel;
const int32* output_multiplier_per_channel =
function_params->output_multiplier_per_channel;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
TFLITE_DCHECK_NE(output_shift_per_channel, nullptr);
TFLITE_DCHECK_NE(output_multiplier_per_channel, nullptr);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const int8x16_t output_activation_min_vec =
vdupq_n_s8(static_cast<int8>(output_activation_min));
const int8x16_t output_activation_max_vec =
vdupq_n_s8(static_cast<int8>(output_activation_max));
const int8* input_data_depthwise = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_depthwise = output_block_data;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
// Simulate NEON-register transposition of subset of filter.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_2_b;
int8x16_t filter_reg_0_a_shifted;
int8x16_t filter_reg_1_a_shifted;
int8x16_t filter_reg_2_a_shifted;
filter_reg_0_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
if (block_height == 4) {
for (int s = 0; s < 2; ++s) {
// Work through one slice, by row, at a time.
const int8* input_data_base = input_data_depthwise + 2 * 8 * s;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise + 4 * s;
const int8* next_input_data = input_data_base;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
const int32x4_t adjusted_bias_data = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t output_shift =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4 * s);
const int32x4_t output_multiplier =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4 * s);
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg = vld1q_s8(next_input_data);
int8x16_t left_bank_1_reg =
vld1q_s8(next_input_data + workspace_height_stride);
int8x16_t left_bank_2_reg =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
int8x16_t left_bank_3_reg =
vld1q_s8(next_input_data + 3 * workspace_height_stride);
int8x16_t left_bank_4_reg =
vld1q_s8(next_input_data + 4 * workspace_height_stride);
int8x16_t left_bank_5_reg =
vld1q_s8(next_input_data + 5 * workspace_height_stride);
int32x4_t acc0;
int32x4_t acc1;
int32x4_t acc2;
int32x4_t acc3;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
for (int i_width = 0; i_width < output_width_micro_repeats;
++i_width) {
next_input_data += width_micro_stride;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += depth;
}
// Load next sub-micro block of data.
int8x16_t right_bank_0_reg;
int8x16_t right_bank_1_reg;
int8x16_t right_bank_2_reg;
int8x16_t right_bank_3_reg;
int8x16_t right_bank_4_reg;
int8x16_t right_bank_5_reg;
// Loading of next block always valid.
right_bank_0_reg = vld1q_s8(next_input_data);
right_bank_1_reg =
vld1q_s8(next_input_data + workspace_height_stride);
right_bank_2_reg =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
right_bank_3_reg =
vld1q_s8(next_input_data + 3 * workspace_height_stride);
right_bank_4_reg =
vld1q_s8(next_input_data + 4 * workspace_height_stride);
right_bank_5_reg =
vld1q_s8(next_input_data + 5 * workspace_height_stride);
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a_shifted, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a_shifted, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a_shifted, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a_shifted, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a_shifted, left_bank_3_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a_shifted, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
left_bank_0_reg = vrev32q_u16(left_bank_0_reg);
left_bank_1_reg = vrev32q_u16(left_bank_1_reg);
left_bank_2_reg = vrev32q_u16(left_bank_2_reg);
left_bank_3_reg = vrev32q_u16(left_bank_3_reg);
left_bank_4_reg = vrev32q_u16(left_bank_4_reg);
left_bank_5_reg = vrev32q_u16(left_bank_5_reg);
vtrn1_s8x2_in_place(&left_bank_0_reg, &right_bank_0_reg);
vtrn1_s8x2_in_place(&left_bank_1_reg, &right_bank_1_reg);
vtrn1_s8x2_in_place(&left_bank_2_reg, &right_bank_2_reg);
vtrn1_s8x2_in_place(&left_bank_3_reg, &right_bank_3_reg);
vtrn1_s8x2_in_place(&left_bank_4_reg, &right_bank_4_reg);
vtrn1_s8x2_in_place(&left_bank_5_reg, &right_bank_5_reg);
output_data += depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a_shifted, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a_shifted, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a_shifted, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a_shifted, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a_shifted, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a_shifted, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a_shifted, left_bank_3_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a_shifted, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a_shifted, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
left_bank_0_reg = right_bank_0_reg;
left_bank_1_reg = right_bank_1_reg;
left_bank_2_reg = right_bank_2_reg;
left_bank_3_reg = right_bank_3_reg;
left_bank_4_reg = right_bank_4_reg;
left_bank_5_reg = right_bank_5_reg;
output_data += depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
}
}
if (residual_width > 0) {
next_input_data += width_micro_stride;
const int output_width = residual_width;
// Load next sub-micro block of data.
int8x16_t right_bank_0_reg;
int8x16_t right_bank_1_reg;
int8x16_t right_bank_2_reg;
int8x16_t right_bank_3_reg;
int8x16_t right_bank_4_reg;
int8x16_t right_bank_5_reg;
// Logic: (output_width - 1) * stride_val < 2.
const bool no_right_block = output_width < 3;
if (no_right_block) {
// Only needed for sanitizer checks.
right_bank_0_reg = vdupq_n_s8(0);
right_bank_1_reg = vdupq_n_s8(0);
right_bank_2_reg = vdupq_n_s8(0);
right_bank_3_reg = vdupq_n_s8(0);
right_bank_4_reg = vdupq_n_s8(0);
right_bank_5_reg = vdupq_n_s8(0);
} else {
right_bank_0_reg = vld1q_s8(next_input_data);
right_bank_1_reg =
vld1q_s8(next_input_data + workspace_height_stride);
right_bank_2_reg =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
right_bank_3_reg =
vld1q_s8(next_input_data + 3 * workspace_height_stride);
right_bank_4_reg =
vld1q_s8(next_input_data + 4 * workspace_height_stride);
right_bank_5_reg =
vld1q_s8(next_input_data + 5 * workspace_height_stride);
}
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_1_a, left_bank_3_reg);
acc2 = vdotq_s32(acc2, filter_reg_2_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_1_a, left_bank_4_reg);
acc3 = vdotq_s32(acc3, filter_reg_2_a, left_bank_5_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
biregister_rotate_8(&left_bank_0_reg, &right_bank_0_reg);
biregister_rotate_8(&left_bank_1_reg, &right_bank_1_reg);
biregister_rotate_8(&left_bank_2_reg, &right_bank_2_reg);
biregister_rotate_8(&left_bank_3_reg, &right_bank_3_reg);
biregister_rotate_8(&left_bank_4_reg, &right_bank_4_reg);
biregister_rotate_8(&left_bank_5_reg, &right_bank_5_reg);
output_data += depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_2_reg);
acc2 = vdotq_s32(acc2, filter_reg_0_a, left_bank_2_reg);
acc3 = vdotq_s32(acc3, filter_reg_0_a, left_bank_3_reg);
}
}
input_data_base += 4 * workspace_height_stride;
output_data_base += 4 * output_height_stride;
// Move to next sub-block: advance to second set of filters, to new
// bias.
filter_reg_0_a = filter_reg_0_b;
filter_reg_1_a = filter_reg_1_b;
filter_reg_2_a = filter_reg_2_b;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
}
} else {
const int8* input_data_base = input_data_depthwise;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise;
const int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t output_shift_a =
vld1q_s32(output_shift_per_channel + j_depth * 8);
const int32x4_t output_multiplier_a =
vld1q_s32(output_multiplier_per_channel + j_depth * 8);
const int32x4_t output_shift_b =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4);
const int32x4_t output_multiplier_b =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4);
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* next_input_data = input_data_base;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg_a = vld1q_s8(next_input_data);
int8x16_t left_bank_1_reg_a =
vld1q_s8(next_input_data + workspace_height_stride);
int8x16_t left_bank_2_reg_a =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
int8x16_t left_bank_0_reg_b = vld1q_s8(next_input_data + 16);
int8x16_t left_bank_1_reg_b =
vld1q_s8(next_input_data + workspace_height_stride + 16);
int8x16_t left_bank_2_reg_b =
vld1q_s8(next_input_data + 2 * workspace_height_stride + 16);
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
next_input_data += width_micro_stride;
const int output_width =
i_width == output_width_micro_repeats ? residual_width : 4;
int8x16_t right_bank_0_reg_a;
int8x16_t right_bank_1_reg_a;
int8x16_t right_bank_2_reg_a;
int8x16_t right_bank_0_reg_b;
int8x16_t right_bank_1_reg_b;
int8x16_t right_bank_2_reg_b;
// Logic: (output_width - 1) * stride_val < 2.
const bool no_right_block = output_width < 3;
// Load next sub-micro block of data.
if (no_right_block) {
// Only needed for sanitizer checks.
right_bank_0_reg_a = vdupq_n_s8(0);
right_bank_1_reg_a = vdupq_n_s8(0);
right_bank_2_reg_a = vdupq_n_s8(0);
right_bank_0_reg_b = vdupq_n_s8(0);
right_bank_1_reg_b = vdupq_n_s8(0);
right_bank_2_reg_b = vdupq_n_s8(0);
} else {
right_bank_0_reg_a = vld1q_s8(next_input_data);
right_bank_1_reg_a =
vld1q_s8(next_input_data + workspace_height_stride);
right_bank_2_reg_a =
vld1q_s8(next_input_data + 2 * workspace_height_stride);
right_bank_0_reg_b = vld1q_s8(next_input_data + 16);
right_bank_1_reg_b =
vld1q_s8(next_input_data + workspace_height_stride + 16);
right_bank_2_reg_b =
vld1q_s8(next_input_data + 2 * workspace_height_stride + 16);
}
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
int32x4_t acc_a = adjusted_bias_data_a;
int32x4_t acc_b = adjusted_bias_data_b;
acc_a = vdotq_s32(acc_a, filter_reg_0_a, left_bank_0_reg_a);
acc_a = vdotq_s32(acc_a, filter_reg_1_a, left_bank_1_reg_a);
acc_a = vdotq_s32(acc_a, filter_reg_2_a, left_bank_2_reg_a);
acc_b = vdotq_s32(acc_b, filter_reg_0_b, left_bank_0_reg_b);
acc_b = vdotq_s32(acc_b, filter_reg_1_b, left_bank_1_reg_b);
acc_b = vdotq_s32(acc_b, filter_reg_2_b, left_bank_2_reg_b);
// Fixed-point multiplication.
acc_a = vqrdmulhq_s32(acc_a, output_multiplier_a);
acc_b = vqrdmulhq_s32(acc_b, output_multiplier_b);
acc_a =
DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc_a, output_shift_a);
acc_b =
DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc_b, output_shift_b);
// Add the output offset.
int16x8_t acc_s16_0_0 =
vcombine_s16(vqmovn_s32(acc_a), vqmovn_s32(acc_b));
acc_s16_0_0 = vqaddq_s16(acc_s16_0_0, output_offset_vec);
// Apply the activation function.
int8x8_t acc_u8_0_0 = vqmovxn_s16(acc_s16_0_0);
acc_u8_0_0 = util_vmax_x8(acc_u8_0_0,
vget_low_s8(output_activation_min_vec));
acc_u8_0_0 = util_vmin_x8(acc_u8_0_0,
vget_low_s8(output_activation_max_vec));
vst1_s8(output_data, acc_u8_0_0);
biregister_rotate_8(&left_bank_0_reg_a, &right_bank_0_reg_a);
biregister_rotate_8(&left_bank_1_reg_a, &right_bank_1_reg_a);
biregister_rotate_8(&left_bank_2_reg_a, &right_bank_2_reg_a);
biregister_rotate_8(&left_bank_0_reg_b, &right_bank_0_reg_b);
biregister_rotate_8(&left_bank_1_reg_b, &right_bank_1_reg_b);
biregister_rotate_8(&left_bank_2_reg_b, &right_bank_2_reg_b);
output_data += depth;
}
}
input_data_base += workspace_height_stride;
output_data_base += output_height_stride;
}
}
input_data_depthwise += depth_micro_stride;
output_data_depthwise += 8;
}
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kNoMultiplication,
/*stride=*/2> {
static inline int8x8_t vqmovxn_s16(int16x8_t x) { return vqmovn_s16(x); }
static inline int8x8_t util_vmin_x8(int8x8_t a, int8x8_t b) {
return vmin_s8(a, b);
}
static inline int8x8_t util_vmax_x8(int8x8_t a, int8x8_t b) {
return vmax_s8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kPerChannelInt8;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int input_width_overall_micro_repeats =
function_params->input_width_overall_micro_repeats;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int depth = function_params->input_depth;
constexpr int kStrideVal = 2;
constexpr int kFourOverStride = 2;
TFLITE_DCHECK_EQ(function_params->stride, kStrideVal);
TFLITE_DCHECK_EQ(function_params->four_over_stride, kFourOverStride);
const int workspace_width_micro_repeats =
function_params->workspace_width_micro_repeats;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int width_micro_stride = 4 * 8;
const int depth_micro_stride =
width_micro_stride * input_width_overall_micro_repeats;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_offset = function_params->output_offset;
const int32* output_shift_per_channel =
function_params->output_shift_per_channel;
const int32* output_multiplier_per_channel =
function_params->output_multiplier_per_channel;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
TFLITE_DCHECK_NE(output_shift_per_channel, nullptr);
TFLITE_DCHECK_NE(output_multiplier_per_channel, nullptr);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
// This version only does min/max on 64 bits.
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const int8x8_t output_activation_min_vec =
vdup_n_s8(static_cast<int8>(output_activation_min));
const int8x8_t output_activation_max_vec =
vdup_n_s8(static_cast<int8>(output_activation_max));
constexpr int shuffled_filter_increment = 2 * 3 * 4 * 4;
TFLITE_DCHECK_LE(block_height, 2);
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
const int8* filter_block =
filter_workspace + shuffled_filter_increment * j_depth;
if (block_height == 2) {
for (int s = 0; s < 2; ++s) {
// Simulate NEON-register transposition of subset of filter.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_2_a;
filter_reg_0_a = vld1q_s8(filter_block + s * 16);
filter_reg_1_a = vld1q_s8(filter_block + s * 16 + 32);
filter_reg_2_a = vld1q_s8(filter_block + s * 16 + 64);
const int8* scratch_data =
scratch_block_data + depth_micro_stride * j_depth;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
const int8* input_data_0 = scratch_data + s * 2 * 8;
const int32x4_t adjusted_bias_data = vld1q_s32(bias_data);
const int32x4_t output_shift =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4 * s);
const int32x4_t output_multiplier =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4 * s);
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg = vld1q_s8(input_data_0);
int8x16_t left_bank_1_reg =
vld1q_s8(input_data_0 + workspace_height_stride);
int8x16_t left_bank_2_reg =
vld1q_s8(input_data_0 + 2 * workspace_height_stride);
int8x16_t left_bank_3_reg =
vld1q_s8(input_data_0 + 3 * workspace_height_stride);
int8x16_t left_bank_4_reg =
vld1q_s8(input_data_0 + 4 * workspace_height_stride);
int8x16_t right_bank_0_reg;
int8x16_t right_bank_1_reg;
int8x16_t right_bank_2_reg;
int8x16_t right_bank_3_reg;
int8x16_t right_bank_4_reg;
int32x4_t acc0;
int32x4_t acc1;
int16x8_t acc_s16_0_1;
int8x8_t acc_u8;
int i_width = 0;
// When output_width_micro_repeats <
// output_width_overall_micro_repeats, 0 < residual_width <= 2, and so
// residual_width == 1 is then true iff residual_width < 2.
const int adjusted_width_micro_repeats =
(output_width_micro_repeats <
output_width_overall_micro_repeats) &&
(residual_width == 1)
? output_width_micro_repeats
: output_width_overall_micro_repeats;
for (; i_width < adjusted_width_micro_repeats; ++i_width) {
const int output_width = kFourOverStride;
TFLITE_DCHECK_LE(output_width * kStrideVal, 4);
const int8* input_data =
input_data_0 + width_micro_stride * i_width;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
right_bank_0_reg = vld1q_s8(input_data + width_micro_stride);
right_bank_1_reg = vld1q_s8(input_data + width_micro_stride +
workspace_height_stride);
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_2_reg);
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data + depth * 2 * i_width + 4 * s;
right_bank_2_reg = vld1q_s8(input_data + width_micro_stride +
2 * workspace_height_stride);
right_bank_3_reg = vld1q_s8(input_data + width_micro_stride +
3 * workspace_height_stride);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_3_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_4_reg);
right_bank_4_reg = vld1q_s8(input_data + width_micro_stride +
4 * workspace_height_stride);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
left_bank_0_reg = vrev32q_u16(left_bank_0_reg);
left_bank_1_reg = vrev32q_u16(left_bank_1_reg);
left_bank_2_reg = vrev32q_u16(left_bank_2_reg);
left_bank_3_reg = vrev32q_u16(left_bank_3_reg);
left_bank_4_reg = vrev32q_u16(left_bank_4_reg);
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
vtrn1_s8x2_in_place(&left_bank_0_reg, &right_bank_0_reg);
vtrn1_s8x2_in_place(&left_bank_1_reg, &right_bank_1_reg);
vtrn1_s8x2_in_place(&left_bank_2_reg, &right_bank_2_reg);
vst1_lane_8x4(output_data_base, acc_u8, 0);
vst1_lane_8x4(output_data_base + output_height_stride, acc_u8, 1);
vtrn1_s8x2_in_place(&left_bank_3_reg, &right_bank_3_reg);
vtrn1_s8x2_in_place(&left_bank_4_reg, &right_bank_4_reg);
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_2_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_3_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_4_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
vst1_lane_8x4(output_data_base + depth, acc_u8, 0);
vst1_lane_8x4(output_data_base + depth + output_height_stride,
acc_u8, 1);
left_bank_0_reg = right_bank_0_reg;
left_bank_1_reg = right_bank_1_reg;
left_bank_2_reg = right_bank_2_reg;
left_bank_3_reg = right_bank_3_reg;
left_bank_4_reg = right_bank_4_reg;
}
for (; i_width < output_width_overall_micro_repeats; ++i_width) {
TFLITE_DCHECK_NE(residual_width, kFourOverStride);
// No need to load next ("right") block of data.
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data + depth * 2 * i_width + 4 * s;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc0 = vdotq_s32(acc0, filter_reg_0_a, left_bank_0_reg);
acc0 = vdotq_s32(acc0, filter_reg_1_a, left_bank_1_reg);
acc0 = vdotq_s32(acc0, filter_reg_2_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_0_a, left_bank_2_reg);
acc1 = vdotq_s32(acc1, filter_reg_1_a, left_bank_3_reg);
acc1 = vdotq_s32(acc1, filter_reg_2_a, left_bank_4_reg);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
int8x8_t acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
vst1_lane_8x4(output_data_base, acc_u8, 0);
vst1_lane_8x4(output_data_base + output_height_stride, acc_u8, 1);
left_bank_0_reg = vrev32q_u16(left_bank_0_reg);
left_bank_1_reg = vrev32q_u16(left_bank_1_reg);
left_bank_2_reg = vrev32q_u16(left_bank_2_reg);
left_bank_3_reg = vrev32q_u16(left_bank_3_reg);
left_bank_4_reg = vrev32q_u16(left_bank_4_reg);
vtrn1_s8x2_in_place(&left_bank_0_reg, &right_bank_0_reg);
vtrn1_s8x2_in_place(&left_bank_1_reg, &right_bank_1_reg);
vtrn1_s8x2_in_place(&left_bank_2_reg, &right_bank_2_reg);
vtrn1_s8x2_in_place(&left_bank_3_reg, &right_bank_3_reg);
vtrn1_s8x2_in_place(&left_bank_4_reg, &right_bank_4_reg);
}
}
bias_data += kBiasIncrement;
}
} else {
// block_height == 1.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_b;
filter_reg_0_a = vld1q_s8(filter_block);
filter_reg_1_a = vld1q_s8(filter_block + 32);
filter_reg_2_a = vld1q_s8(filter_block + 64);
filter_reg_0_b = vld1q_s8(filter_block + 16);
filter_reg_1_b = vld1q_s8(filter_block + 16 + 32);
filter_reg_2_b = vld1q_s8(filter_block + 16 + 64);
const int8* scratch_data =
scratch_block_data + depth_micro_stride * j_depth;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
const int8* input_data_0 = scratch_data;
const int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t output_shift_a =
vld1q_s32(output_shift_per_channel + j_depth * 8);
const int32x4_t output_multiplier_a =
vld1q_s32(output_multiplier_per_channel + j_depth * 8);
const int32x4_t output_shift_b =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4);
const int32x4_t output_multiplier_b =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4);
// Load first sub-micro block of data into operational banks.
int8x16_t left_bank_0_reg_a = vld1q_s8(input_data_0);
int8x16_t left_bank_1_reg_a =
vld1q_s8(input_data_0 + workspace_height_stride);
int8x16_t left_bank_2_reg_a =
vld1q_s8(input_data_0 + 2 * workspace_height_stride);
int8x16_t left_bank_0_reg_b = vld1q_s8(input_data_0 + 16);
int8x16_t left_bank_1_reg_b =
vld1q_s8(input_data_0 + workspace_height_stride + 16);
int8x16_t left_bank_2_reg_b =
vld1q_s8(input_data_0 + 2 * workspace_height_stride + 16);
int8x16_t right_bank_0_reg_a;
int8x16_t right_bank_1_reg_a;
int8x16_t right_bank_2_reg_a;
int8x16_t right_bank_0_reg_b;
int8x16_t right_bank_1_reg_b;
int8x16_t right_bank_2_reg_b;
int32x4_t acc0_a;
int32x4_t acc0_b;
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width = i_width == output_width_micro_repeats
? residual_width
: kFourOverStride;
TFLITE_DCHECK_LE(output_width * kStrideVal, 4);
const int8* input_data = input_data_0 + width_micro_stride * i_width;
const bool no_right_block = i_width == output_width_micro_repeats &&
output_width_overall_micro_repeats ==
workspace_width_micro_repeats;
if (!no_right_block) {
// Load next sub-micro block of data.
right_bank_0_reg_a = vld1q_s8(input_data + width_micro_stride);
right_bank_1_reg_a = vld1q_s8(input_data + width_micro_stride +
workspace_height_stride);
right_bank_2_reg_a = vld1q_s8(input_data + width_micro_stride +
2 * workspace_height_stride);
right_bank_0_reg_b = vld1q_s8(input_data + width_micro_stride + 16);
right_bank_1_reg_b = vld1q_s8(input_data + width_micro_stride +
workspace_height_stride + 16);
right_bank_2_reg_b = vld1q_s8(input_data + width_micro_stride +
2 * workspace_height_stride + 16);
}
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data + depth * 2 * i_width;
// Iterate over input width shifts within 4x4 blocks.
{
acc0_a = adjusted_bias_data_a;
acc0_b = adjusted_bias_data_b;
acc0_a = vdotq_s32(acc0_a, filter_reg_0_a, left_bank_0_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_1_a, left_bank_1_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_2_a, left_bank_2_reg_a);
acc0_b = vdotq_s32(acc0_b, filter_reg_0_b, left_bank_0_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_1_b, left_bank_1_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_2_b, left_bank_2_reg_b);
// Fixed-point multiplication.
acc0_a = vqrdmulhq_s32(acc0_a, output_multiplier_a);
acc0_b = vqrdmulhq_s32(acc0_b, output_multiplier_b);
acc0_a = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0_a, output_shift_a);
acc0_b = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0_b, output_shift_b);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0_a), vqmovn_s32(acc0_b));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
int8x8_t acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
vst1_s8(output_data_base, acc_u8);
left_bank_0_reg_a = vrev32q_u16(left_bank_0_reg_a);
left_bank_1_reg_a = vrev32q_u16(left_bank_1_reg_a);
left_bank_2_reg_a = vrev32q_u16(left_bank_2_reg_a);
left_bank_0_reg_b = vrev32q_u16(left_bank_0_reg_b);
left_bank_1_reg_b = vrev32q_u16(left_bank_1_reg_b);
left_bank_2_reg_b = vrev32q_u16(left_bank_2_reg_b);
vtrn1_s8x2_in_place(&left_bank_0_reg_a, &right_bank_0_reg_a);
vtrn1_s8x2_in_place(&left_bank_1_reg_a, &right_bank_1_reg_a);
vtrn1_s8x2_in_place(&left_bank_2_reg_a, &right_bank_2_reg_a);
vtrn1_s8x2_in_place(&left_bank_0_reg_b, &right_bank_0_reg_b);
vtrn1_s8x2_in_place(&left_bank_1_reg_b, &right_bank_1_reg_b);
vtrn1_s8x2_in_place(&left_bank_2_reg_b, &right_bank_2_reg_b);
}
if (output_width > 1) {
acc0_a = adjusted_bias_data_a;
acc0_b = adjusted_bias_data_b;
acc0_a = vdotq_s32(acc0_a, filter_reg_0_a, left_bank_0_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_1_a, left_bank_1_reg_a);
acc0_a = vdotq_s32(acc0_a, filter_reg_2_a, left_bank_2_reg_a);
acc0_b = vdotq_s32(acc0_b, filter_reg_0_b, left_bank_0_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_1_b, left_bank_1_reg_b);
acc0_b = vdotq_s32(acc0_b, filter_reg_2_b, left_bank_2_reg_b);
// Fixed-point multiplication.
acc0_a = vqrdmulhq_s32(acc0_a, output_multiplier_a);
acc0_b = vqrdmulhq_s32(acc0_b, output_multiplier_b);
acc0_a = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0_a, output_shift_a);
acc0_b = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0_b, output_shift_b);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0_a), vqmovn_s32(acc0_b));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
int8x8_t acc_u8 = vqmovxn_s16(acc_s16_0_1);
acc_u8 = util_vmax_x8(acc_u8, output_activation_min_vec);
acc_u8 = util_vmin_x8(acc_u8, output_activation_max_vec);
vst1_s8(output_data_base + depth, acc_u8);
left_bank_0_reg_a = right_bank_0_reg_a;
left_bank_1_reg_a = right_bank_1_reg_a;
left_bank_2_reg_a = right_bank_2_reg_a;
left_bank_0_reg_b = right_bank_0_reg_b;
left_bank_1_reg_b = right_bank_1_reg_b;
left_bank_2_reg_b = right_bank_2_reg_b;
}
}
}
}
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/1> {
static inline int8x8_t vqmovxn_s16(int16x8_t x) { return vqmovn_s16(x); }
static inline int8x8_t util_vmin_x8(int8x8_t a, int8x8_t b) {
return vmin_s8(a, b);
}
static inline int8x8_t util_vmax_x8(int8x8_t a, int8x8_t b) {
return vmax_s8(a, b);
}
static inline int8x16_t util_vminq_x8(int8x16_t a, int8x16_t b) {
return vminq_s8(a, b);
}
static inline int8x16_t util_vmaxq_x8(int8x16_t a, int8x16_t b) {
return vmaxq_s8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kPerChannelInt8;
TFLITE_DCHECK_EQ(function_params->stride, 1);
const int workspace_height_stride =
function_params->workspace_height_stride;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int output_depth = function_params->output_depth;
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
TFLITE_DCHECK(depth_micro_repeats > 0);
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_offset = function_params->output_offset;
const int32* output_shift_per_channel =
function_params->output_shift_per_channel;
const int32* output_multiplier_per_channel =
function_params->output_multiplier_per_channel;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
TFLITE_DCHECK_NE(output_shift_per_channel, nullptr);
TFLITE_DCHECK_NE(output_multiplier_per_channel, nullptr);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const int8x16_t output_activation_min_vec =
vdupq_n_s8(static_cast<int8>(output_activation_min));
const int8x16_t output_activation_max_vec =
vdupq_n_s8(static_cast<int8>(output_activation_max));
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_depthwise = output_block_data;
for (int j_depth = 0; j_depth < depth_micro_repeats; ++j_depth) {
// Simulate NEON-register transposition of subset of filter.
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_2_b;
int8x16_t filter_reg_0_a_shifted;
int8x16_t filter_reg_1_a_shifted;
int8x16_t filter_reg_2_a_shifted;
filter_reg_0_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
// When output_width_micro_repeats < output_width_overall_micro_repeats,
// 0 < residual_width <= 2, and so residual_width == 1 is then true iff
// residual_width < 2.
const int adjusted_width_micro_repeats =
(output_width_micro_repeats < output_width_overall_micro_repeats) &&
(residual_width < 4)
? output_width_micro_repeats
: output_width_overall_micro_repeats;
if (block_height == 4) {
for (int s = 0; s < 2; ++s) {
// Work through one slice, by row, at a time.
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise + 4 * s;
const int8* next_input_data = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
const int32x4_t adjusted_bias_data = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t output_shift =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4 * s);
const int32x4_t output_multiplier =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4 * s);
int8x16_t input_bank_a_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_b_reg; // left 2, right 2, left 3, right 3.
int8x16_t input_bank_c_reg; // left 4, right 4, left 5, right 5.
// Load first sub-micro block of data into operational banks.
input_bank_a_reg =
vld1q_dup_s8x4(next_input_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_a_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_a_reg, 2);
input_bank_b_reg = vld1q_dup_s8x4(
next_input_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 3 * workspace_height_stride,
input_bank_b_reg, 2);
input_bank_c_reg = vld1q_dup_s8x4(
next_input_data +
4 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 5 * workspace_height_stride,
input_bank_c_reg, 2);
int32x4_t acc0;
int32x4_t acc1;
int32x4_t acc2;
int32x4_t acc3;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg, 2);
int i_width = 0;
for (; i_width < adjusted_width_micro_repeats; ++i_width) {
next_input_data += 4;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg,
0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a, input_bank_c_reg,
2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += output_depth;
}
// Load next sub-micro block of data.
input_bank_a_reg =
vld1q_lane_8x4(next_input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 2 * workspace_height_stride,
input_bank_b_reg, 1);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 3 * workspace_height_stride,
input_bank_b_reg, 3);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 4 * workspace_height_stride,
input_bank_c_reg, 1);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 5 * workspace_height_stride,
input_bank_c_reg, 3);
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a_shifted,
input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a_shifted,
input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a_shifted,
input_bank_a_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a_shifted,
input_bank_b_reg, 0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a_shifted,
input_bank_b_reg, 2);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a_shifted,
input_bank_c_reg, 2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg,
0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg,
2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg,
0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg,
2);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a, input_bank_c_reg,
2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
output_data += output_depth;
}
{
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a_shifted,
input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a_shifted,
input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a_shifted,
input_bank_a_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a_shifted,
input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a_shifted,
input_bank_b_reg, 0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a_shifted,
input_bank_b_reg, 2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a_shifted,
input_bank_b_reg, 2);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a_shifted,
input_bank_c_reg, 0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a_shifted,
input_bank_c_reg, 2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg,
0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg,
2);
}
}
if (i_width < output_width_overall_micro_repeats) {
next_input_data += 4;
const int output_width = residual_width;
// Load next sub-micro block of data.
input_bank_a_reg =
vld1q_lane_8x4(next_input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 2 * workspace_height_stride,
input_bank_b_reg, 1);
input_bank_b_reg =
vld1q_lane_8x4(next_input_data + 3 * workspace_height_stride,
input_bank_b_reg, 3);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 4 * workspace_height_stride,
input_bank_c_reg, 1);
input_bank_c_reg =
vld1q_lane_8x4(next_input_data + 5 * workspace_height_stride,
input_bank_c_reg, 3);
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg,
0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_a_reg,
2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_1_a, input_bank_b_reg,
2);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_2_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_1_a, input_bank_c_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_2_a, input_bank_c_reg,
2);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift);
acc1 = vqrdmulhq_s32(acc1, output_multiplier);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift);
acc2 = vqrdmulhq_s32(acc2, output_multiplier);
acc2 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc2, output_shift);
acc3 = vqrdmulhq_s32(acc3, output_multiplier);
acc3 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc3, output_shift);
// Add the output offset.
int16x8_t acc_s16_0_1 =
vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
int16x8_t acc_s16_2_3 =
vcombine_s16(vqmovn_s32(acc2), vqmovn_s32(acc3));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
acc_s16_2_3 = vqaddq_s16(acc_s16_2_3, output_offset_vec);
// Apply the activation function.
int8x16_t acc_u8_all = vcombine_u8(vqmovxn_s16(acc_s16_0_1),
vqmovxn_s16(acc_s16_2_3));
acc_u8_all = util_vmaxq_x8(acc_u8_all, output_activation_min_vec);
acc_u8_all = util_vminq_x8(acc_u8_all, output_activation_max_vec);
vst1q_lane_8x4(output_data, acc_u8_all, 0);
vst1q_lane_8x4(output_data + output_height_stride, acc_u8_all, 1);
vst1q_lane_8x4(output_data + 2 * output_height_stride, acc_u8_all,
2);
vst1q_lane_8x4(output_data + 3 * output_height_stride, acc_u8_all,
3);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 8);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 8);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 8);
output_data += output_depth;
acc0 = adjusted_bias_data;
acc1 = adjusted_bias_data;
acc2 = adjusted_bias_data;
acc3 = adjusted_bias_data;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg,
0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg,
0);
acc2 = vdotq_four_lane_s32(acc2, filter_reg_0_a, input_bank_b_reg,
0);
acc3 = vdotq_four_lane_s32(acc3, filter_reg_0_a, input_bank_b_reg,
2);
}
}
// scratch_block_data += 4 * workspace_height_stride;
output_data_base += 4 * output_height_stride;
// Move to next sub-block: advance to second set of filters, to new
// bias.
filter_reg_0_a = filter_reg_0_b;
filter_reg_1_a = filter_reg_1_b;
filter_reg_2_a = filter_reg_2_b;
filter_reg_0_a_shifted = vshlq_n_u32(filter_reg_0_a, 8);
filter_reg_1_a_shifted = vshlq_n_u32(filter_reg_1_a, 8);
filter_reg_2_a_shifted = vshlq_n_u32(filter_reg_2_a, 8);
}
} else {
// Block height < 4.
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data_base = output_data_depthwise;
const int32x4_t adjusted_bias_data_a = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_b = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t output_shift_a =
vld1q_s32(output_shift_per_channel + j_depth * 8);
const int32x4_t output_multiplier_a =
vld1q_s32(output_multiplier_per_channel + j_depth * 8);
const int32x4_t output_shift_b =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4);
const int32x4_t output_multiplier_b =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4);
for (int k_height = 0; k_height < block_height; ++k_height) {
const int8* next_input_data =
scratch_block_data + k_height * workspace_height_stride;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_data_base;
int8x16_t input_bank_p_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_q_reg; // left 2, right 2, left 3, right 3.
// Load first sub-micro block of data into operational banks.
input_bank_p_reg =
vld1q_dup_s8x4(next_input_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_p_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_p_reg, 2);
input_bank_q_reg = vld1q_dup_s8x4(
next_input_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
next_input_data += 4;
const int output_width =
i_width == output_width_micro_repeats ? residual_width : 4;
// Load next sub-micro block of data.
input_bank_p_reg =
vld1q_lane_8x4(next_input_data, input_bank_p_reg, 1);
input_bank_p_reg = vld1q_lane_8x4(
next_input_data + workspace_height_stride, input_bank_p_reg, 3);
input_bank_q_reg =
vld1q_lane_8x4(next_input_data + 2 * workspace_height_stride,
input_bank_q_reg, 1);
// Iterate over input width shifts within 4x4 blocks.
for (int x = 0; x < output_width; ++x) {
int32x4_t acc_a = adjusted_bias_data_a;
int32x4_t acc_b = adjusted_bias_data_b;
acc_a = vdotq_four_lane_s32(acc_a, filter_reg_0_a,
input_bank_p_reg, 0);
acc_a = vdotq_four_lane_s32(acc_a, filter_reg_1_a,
input_bank_p_reg, 2);
acc_a = vdotq_four_lane_s32(acc_a, filter_reg_2_a,
input_bank_q_reg, 0);
acc_b = vdotq_four_lane_s32(acc_b, filter_reg_0_b,
input_bank_p_reg, 0);
acc_b = vdotq_four_lane_s32(acc_b, filter_reg_1_b,
input_bank_p_reg, 2);
acc_b = vdotq_four_lane_s32(acc_b, filter_reg_2_b,
input_bank_q_reg, 0);
// Fixed-point multiplication.
acc_a = vqrdmulhq_s32(acc_a, output_multiplier_a);
acc_b = vqrdmulhq_s32(acc_b, output_multiplier_b);
acc_a =
DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc_a, output_shift_a);
acc_b =
DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc_b, output_shift_b);
// Add the output offset.
int16x8_t acc_s16_0_0 =
vcombine_s16(vqmovn_s32(acc_a), vqmovn_s32(acc_b));
acc_s16_0_0 = vqaddq_s16(acc_s16_0_0, output_offset_vec);
// Apply the activation function.
int8x8_t acc_u8_0_0 = vqmovxn_s16(acc_s16_0_0);
acc_u8_0_0 = util_vmax_x8(acc_u8_0_0,
vget_low_s8(output_activation_min_vec));
acc_u8_0_0 = util_vmin_x8(acc_u8_0_0,
vget_low_s8(output_activation_max_vec));
vst1_s8(output_data, acc_u8_0_0);
input_bank_p_reg = vshrq_n_u64(input_bank_p_reg, 8);
input_bank_q_reg = vshrq_n_u64(input_bank_q_reg, 8);
output_data += output_depth;
}
}
output_data_base += output_height_stride;
}
}
output_data_depthwise += 8;
}
} // NOLINT(readability/fn_size) Manually unrolled.
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
template <>
struct KernelMacroBlock<
DepthwiseConvImplementation::kUseIntrinsics3x3DotProduct,
QuantizationType::kPerChannelInt8,
DepthwiseConvDepthMultiplication::kUnitInputDepth,
/*stride=*/2> {
static inline int8x8_t vqmovxn_s16(int16x8_t x) { return vqmovn_s16(x); }
static inline int8x8_t util_vmin_x8(int8x8_t a, int8x8_t b) {
return vmin_s8(a, b);
}
static inline int8x8_t util_vmax_x8(int8x8_t a, int8x8_t b) {
return vmax_s8(a, b);
}
static inline void KernelMacroBlockIntrinsics(
const int8* scratch_block_data, const int8* filter_workspace,
const int32* bias_data, int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
static constexpr QuantizationType quantization_type =
QuantizationType::kPerChannelInt8;
const int workspace_height_stride =
function_params->workspace_height_stride;
const int output_width_micro_repeats =
function_params->output_width_micro_repeats;
const int depth_micro_repeats = function_params->depth_micro_repeats;
const int output_depth = function_params->output_depth;
constexpr int kStrideVal = 2;
TFLITE_DCHECK_EQ(function_params->stride, kStrideVal);
const int output_width_overall_micro_repeats =
function_params->output_width_overall_micro_repeats;
const int block_height = function_params->outbound_block_height;
const int residual_width = function_params->output_residual_width;
const int output_height_stride = function_params->output_height_stride;
constexpr int kBiasIncrement = 4;
const int32 output_activation_min =
function_params->quantized_activation_min;
const int32 output_activation_max =
function_params->quantized_activation_max;
const int32 output_offset = function_params->output_offset;
const int32* output_shift_per_channel =
function_params->output_shift_per_channel;
const int32* output_multiplier_per_channel =
function_params->output_multiplier_per_channel;
if (quantization_type == QuantizationType::kNonPerChannelUint8) {
TFLITE_DCHECK_GE(output_activation_min, 0);
TFLITE_DCHECK_LT(output_activation_min, 256);
TFLITE_DCHECK_GE(output_activation_max, 0);
TFLITE_DCHECK_LT(output_activation_max, 256);
} else {
TFLITE_DCHECK_GE(output_activation_min, -128);
TFLITE_DCHECK_LT(output_activation_min, 128);
TFLITE_DCHECK_GE(output_activation_max, -128);
TFLITE_DCHECK_LT(output_activation_max, 128);
TFLITE_DCHECK_NE(output_shift_per_channel, nullptr);
TFLITE_DCHECK_NE(output_multiplier_per_channel, nullptr);
}
TFLITE_DCHECK_GE(output_offset, -32878);
TFLITE_DCHECK_LT(output_offset, 32768);
TFLITE_DCHECK_GE(depth_micro_repeats, 1);
const int16x8_t output_offset_vec =
vdupq_n_s16(static_cast<int16>(output_offset));
const int8x16_t output_activation_min_vec =
vdupq_n_s8(static_cast<int8>(output_activation_min));
const int8x16_t output_activation_max_vec =
vdupq_n_s8(static_cast<int8>(output_activation_max));
for (int j_depth = 0; j_depth < (depth_micro_repeats * 1 + 0); ++j_depth) {
int8x16_t filter_reg_0_a;
int8x16_t filter_reg_0_b;
int8x16_t filter_reg_1_a;
int8x16_t filter_reg_1_b;
int8x16_t filter_reg_2_a;
int8x16_t filter_reg_2_b;
filter_reg_0_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_0_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_1_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_a = vld1q_s8(filter_workspace);
filter_workspace += 16;
filter_reg_2_b = vld1q_s8(filter_workspace);
filter_workspace += 16;
const int32x4_t adjusted_bias_data_s_0 = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t adjusted_bias_data_s_1 = vld1q_s32(bias_data);
bias_data += kBiasIncrement;
const int32x4_t output_shift_s_0 =
vld1q_s32(output_shift_per_channel + j_depth * 8);
const int32x4_t output_multiplier_s_0 =
vld1q_s32(output_multiplier_per_channel + j_depth * 8);
const int32x4_t output_shift_s_1 =
vld1q_s32(output_shift_per_channel + j_depth * 8 + 4);
const int32x4_t output_multiplier_s_1 =
vld1q_s32(output_multiplier_per_channel + j_depth * 8 + 4);
if (block_height == 2) {
const int8* scratch_data = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
int8x16_t input_bank_a_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_b_reg; // left 2, right 2, left 3, right 3.
int8x16_t input_bank_c_reg; // left 4, right 4, xxx, xxx.
// Load first sub-micro block of data into operational banks.
input_bank_a_reg =
vld1q_dup_s8x4(scratch_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_a_reg = vld1q_lane_8x4(
scratch_data + workspace_height_stride, input_bank_a_reg, 2);
input_bank_b_reg = vld1q_dup_s8x4(
scratch_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
input_bank_b_reg = vld1q_lane_8x4(
scratch_data + 3 * workspace_height_stride, input_bank_b_reg, 2);
input_bank_c_reg = vld1q_dup_s8x4(
scratch_data +
4 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
int32x4_t acc0;
int32x4_t acc1;
// When output_width_micro_repeats < output_width_overall_micro_repeats,
// 0 < residual_width <= 2, and so residual_width == 1 is then true iff
// residual_width < 2.
const int adjusted_width_micro_repeats =
(output_width_micro_repeats < output_width_overall_micro_repeats) &&
(residual_width < 2)
? output_width_micro_repeats
: output_width_overall_micro_repeats;
int i_width = 0;
for (; i_width < adjusted_width_micro_repeats; ++i_width) {
const int8* input_data = scratch_data + 4 + 4 * i_width;
// Load next sub-micro block of data.
input_bank_a_reg = vld1q_lane_8x4(input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 2 * workspace_height_stride, input_bank_b_reg, 1);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 3 * workspace_height_stride, input_bank_b_reg, 3);
input_bank_c_reg = vld1q_lane_8x4(
input_data + 4 * workspace_height_stride, input_bank_c_reg, 1);
int16x8_t acc_s16_0_1;
int8x8_t acc_u8_0_1;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data_s_0;
acc1 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_0);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_0);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_0);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_0);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_s8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_s8(output_activation_max_vec));
vst1_lane_8x4(output_data, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + output_height_stride, acc_u8_0_1, 1);
acc0 = adjusted_bias_data_s_1;
acc1 = adjusted_bias_data_s_1;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_b, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_b, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_1);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_1);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_1);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_1);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_s8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_s8(output_activation_max_vec));
vst1_lane_8x4(output_data + 4, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + 4 + output_height_stride, acc_u8_0_1,
1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
// output_width == four_over_stride.
acc0 = adjusted_bias_data_s_0;
acc1 = adjusted_bias_data_s_0;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_0);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_0);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_0);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_0);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 =
util_vmax_x8(acc_u8_0_1, vget_low_s8(output_activation_min_vec));
acc_u8_0_1 =
util_vmin_x8(acc_u8_0_1, vget_low_s8(output_activation_max_vec));
vst1_lane_8x4(output_data, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + output_height_stride, acc_u8_0_1, 1);
acc0 = adjusted_bias_data_s_1;
acc1 = adjusted_bias_data_s_1;
acc0 = vdotq_four_lane_s32(acc0, filter_reg_0_b, input_bank_a_reg, 0);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_1_b, input_bank_a_reg, 2);
acc0 = vdotq_four_lane_s32(acc0, filter_reg_2_b, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_b_reg, 0);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_b_reg, 2);
acc1 = vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_1);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_1);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_1);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_1);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 =
util_vmax_x8(acc_u8_0_1, vget_low_s8(output_activation_min_vec));
acc_u8_0_1 =
util_vmin_x8(acc_u8_0_1, vget_low_s8(output_activation_max_vec));
vst1_lane_8x4(output_data + 4, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + 4 + output_height_stride, acc_u8_0_1, 1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
for (; i_width < output_width_overall_micro_repeats; ++i_width) {
// output_width == 1.
const int8* input_data = scratch_data + 4 + 4 * i_width;
// Load next sub-micro block of data.
input_bank_a_reg = vld1q_lane_8x4(input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 2 * workspace_height_stride, input_bank_b_reg, 1);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 3 * workspace_height_stride, input_bank_b_reg, 3);
input_bank_c_reg = vld1q_lane_8x4(
input_data + 4 * workspace_height_stride, input_bank_c_reg, 1);
int16x8_t acc_s16_0_1;
int8x8_t acc_u8_0_1;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data_s_0;
acc1 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_a, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_a, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_a, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_0);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_0);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_0);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_0);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_s8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_s8(output_activation_max_vec));
vst1_lane_8x4(output_data, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + output_height_stride, acc_u8_0_1, 1);
acc0 = adjusted_bias_data_s_1;
acc1 = adjusted_bias_data_s_1;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_b, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_b, input_bank_a_reg, 2);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_b_reg, 2);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_c_reg, 0);
// Fixed-point multiplication.
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_1);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_1);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_1);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_1);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_s8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_s8(output_activation_max_vec));
vst1_lane_8x4(output_data + 4, acc_u8_0_1, 0);
vst1_lane_8x4(output_data + 4 + output_height_stride, acc_u8_0_1,
1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
input_bank_c_reg = vshrq_n_u64(input_bank_c_reg, 16);
output_data += output_depth;
}
}
} else {
TFLITE_DCHECK_EQ(block_height, 1);
// Work through one slice, by row, at a time.
const int8* scratch_data = scratch_block_data;
typename QuantizationTypeImpl<quantization_type>::ExternalType*
output_data = output_block_data + 8 * j_depth;
int8x16_t input_bank_a_reg; // left 0, right 0, left 1, right 1.
int8x16_t input_bank_b_reg; // left 2, right 2, xxx, xxx.
// Load first sub-micro block of data into operational banks.
input_bank_a_reg =
vld1q_dup_s8x4(scratch_data); // Load lane 0, avoiding
// uninitialized variable.
input_bank_a_reg = vld1q_lane_8x4(
scratch_data + workspace_height_stride, input_bank_a_reg, 2);
input_bank_b_reg = vld1q_dup_s8x4(
scratch_data +
2 * workspace_height_stride); // Load lane 0, avoiding
// uninitialized variable.
int32x4_t acc0;
int32x4_t acc1;
for (int i_width = 0; i_width < output_width_overall_micro_repeats;
++i_width) {
const int output_width =
i_width == output_width_micro_repeats ? residual_width : 2;
TFLITE_DCHECK_LE(output_width, 2);
TFLITE_DCHECK_GE(output_width, 1);
TFLITE_DCHECK_LE(output_width * kStrideVal, 4);
const int8* input_data = scratch_data + 4 + 4 * i_width;
// Load next sub-micro block of data.
input_bank_a_reg = vld1q_lane_8x4(input_data, input_bank_a_reg, 1);
input_bank_a_reg = vld1q_lane_8x4(
input_data + workspace_height_stride, input_bank_a_reg, 3);
input_bank_b_reg = vld1q_lane_8x4(
input_data + 2 * workspace_height_stride, input_bank_b_reg, 1);
int16x8_t acc_s16_0_1;
int8x8_t acc_u8_0_1;
// Iterate over input width shifts within 4x4 blocks.
{
acc0 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_0);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_0);
// Second sub-block accumulation.
acc1 = adjusted_bias_data_s_1;
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_a_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_a_reg, 2);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_1);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_1);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_s8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_s8(output_activation_max_vec));
// This stores the results for both sub-blocks together.
vst1_s8(output_data, acc_u8_0_1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
output_data += output_depth;
}
if (output_width == 2) {
acc0 = adjusted_bias_data_s_0;
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_2_a, input_bank_b_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_0_a, input_bank_a_reg, 0);
acc0 =
vdotq_four_lane_s32(acc0, filter_reg_1_a, input_bank_a_reg, 2);
acc0 = vqrdmulhq_s32(acc0, output_multiplier_s_0);
acc0 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc0, output_shift_s_0);
// Second sub-block accumulation.
acc1 = adjusted_bias_data_s_1;
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_2_b, input_bank_b_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_0_b, input_bank_a_reg, 0);
acc1 =
vdotq_four_lane_s32(acc1, filter_reg_1_b, input_bank_a_reg, 2);
acc1 = vqrdmulhq_s32(acc1, output_multiplier_s_1);
acc1 = DivideByPOT<DepthwiseConvOutputRounding::kUpward>::RunMult(
acc1, output_shift_s_1);
// Add the output offset.
acc_s16_0_1 = vcombine_s16(vqmovn_s32(acc0), vqmovn_s32(acc1));
acc_s16_0_1 = vqaddq_s16(acc_s16_0_1, output_offset_vec);
// Apply the activation function.
acc_u8_0_1 = vqmovxn_s16(acc_s16_0_1);
acc_u8_0_1 = util_vmax_x8(acc_u8_0_1,
vget_low_s8(output_activation_min_vec));
acc_u8_0_1 = util_vmin_x8(acc_u8_0_1,
vget_low_s8(output_activation_max_vec));
// This stores the results for both sub-blocks together.
vst1_s8(output_data, acc_u8_0_1);
input_bank_a_reg = vshrq_n_u64(input_bank_a_reg, 16);
input_bank_b_reg = vshrq_n_u64(input_bank_b_reg, 16);
output_data += output_depth;
}
}
}
}
}
static inline void Run(const int8* scratch_block_data,
const int8* filter_workspace, const int32* bias_data,
int8* output_block_data,
const DepthwiseConvDotProdParams* function_params) {
KernelMacroBlockIntrinsics(scratch_block_data, filter_workspace, bias_data,
output_block_data, function_params);
}
};
#undef vst1_lane_8x4
#undef vst1q_lane_8x4
#undef vld1q_lane_s8x8
#undef vld1_lane_8x4
#undef vld1q_lane_8x4
#undef vld1q_dup_s8x4
#endif // USE_NEON
} // namespace depthwise_conv
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_DEPTHWISECONV_UINT8_TRANSITIONAL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8_transitional.h | C++ | apache-2.0 | 371,628 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
#define EIGEN_USE_CUSTOM_THREAD_POOL
#define EIGEN_USE_THREADS
// NOTE: Eigen is slightly different internally and externally. We need to
// hack the unsupported/Eigen/CXX11/Tensor header instantiation macros at
// specific places, so we need two copies of the hacked file, one for
// internal and one for external.
// If you have trouble simply undef out the reducer macro e.g.
// TFLITE_REDUCE_INSTANTIATIONS_GOOGLE, but be aware this will make
// the binary much bigger!
#define TFLITE_REDUCE_INSTANTIATIONS_OPEN_SOURCE
#define Eigen EigenForTFLite
#if defined(TFLITE_REDUCE_INSTANTIATIONS_GOOGLE)
#include "tensorflow/lite/kernels/internal/optimized/eigen_tensor_reduced_instantiations_google.h"
#elif defined(TFLITE_REDUCE_INSTANTIATIONS_OPEN_SOURCE)
#include "tensorflow/lite/kernels/internal/optimized/eigen_tensor_reduced_instantiations_oss.h"
#else
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#endif
#include "tensorflow/core/kernels/eigen_spatial_convolutions-inl.h"
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h | C | apache-2.0 | 1,914 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is essentially unsupported/CXX11/Eigen/Tensor.h
// TODO(petewarden) - move this to a common location in Eigen itself.
// clang-format off
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_TENSOR_REDUCED_INSTANTIATIONS_GOOGLE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_TENSOR_REDUCED_INSTANTIATIONS_GOOGLE_H_
#include "Eigen/Core"
#if defined(EIGEN_USE_SYCL)
#undef min
#undef max
#undef isnan
#undef isinf
#undef isfinite
#include <CL/sycl.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <utility>
#endif
#include <cmath>
#include <cstddef>
#include <cstring>
#ifdef _WIN32
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#include <windows.h>
#else
#include <stdint.h>
#include <unistd.h>
#endif
#if __cplusplus > 199711 || EIGEN_COMP_MSVC >= 1900
#include <random>
#endif
#ifdef _WIN32
#include <windows.h>
#elif defined(__APPLE__)
#include <mach/mach_time.h>
#else
#include <time.h>
#endif
#ifdef EIGEN_USE_THREADS
#include "third_party/eigen3/unsupported/Eigen/CXX11/ThreadPool"
#endif
#include "Eigen/src/Core/util/DisableStupidWarnings.h"
#include "third_party/eigen3/unsupported/Eigen/SpecialFunctions"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/util/CXX11Meta.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/util/MaxSizeVector.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBase.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h"
#undef TENSOR_CONTRACTION_DISPATCH
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
if (this->m_lhs_inner_dim_contiguous && \
this->m_rhs_inner_dim_contiguous && \
!this->m_rhs_inner_dim_reordered) { \
METHOD<true, true, false, ALIGNMENT> ARGS; \
} else { \
eigen_assert(false && "Unsupported contraction formats"); \
}
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorScan.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/Tensor.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorMap.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorRef.h"
#include "third_party/eigen3/unsupported/Eigen/CXX11/src/Tensor/TensorIO.h"
#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_TENSOR_REDUCED_INSTANTIATIONS_GOOGLE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/eigen_tensor_reduced_instantiations_google.h | C++ | apache-2.0 | 7,764 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
// This is essentially unsupported/CXX11/Eigen/Tensor.h
// TODO(petewarden) - move this to a common location in Eigen itself.
// clang-format off
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_TENSOR_REDUCED_INSTANTIATIONS_OSS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_TENSOR_REDUCED_INSTANTIATIONS_OSS_H_
#include "Eigen/Core"
#if defined(EIGEN_USE_SYCL)
#undef min
#undef max
#undef isnan
#undef isinf
#undef isfinite
#include <CL/sycl.hpp>
#include <iostream>
#include <map>
#include <memory>
#include <utility>
#endif
#include <cmath>
#include <cstddef>
#include <cstring>
#ifdef _WIN32
typedef __int16 int16_t;
typedef unsigned __int16 uint16_t;
typedef __int32 int32_t;
typedef unsigned __int32 uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#include <windows.h>
#else
#include <stdint.h>
#include <unistd.h>
#endif
#if __cplusplus > 199711 || EIGEN_COMP_MSVC >= 1900
#include <random>
#endif
#ifdef _WIN32
#include <windows.h>
#elif defined(__APPLE__)
#include <mach/mach_time.h>
#else
#include <time.h>
#endif
#ifdef EIGEN_USE_THREADS
#include "unsupported/Eigen/CXX11/ThreadPool"
#endif
#include "Eigen/src/Core/util/DisableStupidWarnings.h"
#include "unsupported/Eigen/SpecialFunctions"
#include "unsupported/Eigen/CXX11/src/util/CXX11Meta.h"
#include "unsupported/Eigen/CXX11/src/util/MaxSizeVector.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMacros.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorForwardDeclarations.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMeta.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorFunctors.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorCostModel.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDeviceDefault.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDeviceThreadPool.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDeviceGpu.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorIndexList.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDimensionList.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDimensions.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorInitializer.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorTraits.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorRandom.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorUInt128.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorIntDiv.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorGlobalFunctions.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorBase.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorBlock.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorEvaluator.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorExpr.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorReduction.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorReductionGpu.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorArgMax.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorConcatenation.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContractionMapper.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContractionBlocking.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContraction.h"
#undef TENSOR_CONTRACTION_DISPATCH
#define TENSOR_CONTRACTION_DISPATCH(METHOD, ALIGNMENT, ARGS) \
if (this->m_lhs_inner_dim_contiguous && \
this->m_rhs_inner_dim_contiguous && \
!this->m_rhs_inner_dim_reordered) { \
METHOD<true, true, false, ALIGNMENT> ARGS; \
} else { \
eigen_assert(false && "Unsupported contraction formats"); \
}
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContractionThreadPool.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorContractionGpu.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorConversion.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorConvolution.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorFFT.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorPatch.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorImagePatch.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorVolumePatch.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorBroadcasting.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorChipping.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorInflation.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorLayoutSwap.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMorphing.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorPadding.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorReverse.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorShuffling.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorStriding.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorCustomOp.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorEvalTo.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorForcedEval.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorGenerator.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorAssign.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorScan.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorTrace.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorExecutor.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorDevice.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorStorage.h"
#include "unsupported/Eigen/CXX11/src/Tensor/Tensor.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorFixedSize.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorMap.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorRef.h"
#include "unsupported/Eigen/CXX11/src/Tensor/TensorIO.h"
#include "Eigen/src/Core/util/ReenableStupidWarnings.h"
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_TENSOR_REDUCED_INSTANTIATIONS_OSS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/eigen_tensor_reduced_instantiations_oss.h | C++ | apache-2.0 | 6,539 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_IM2COL_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_IM2COL_UTILS_H_
#include <cassert>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
template <typename T>
inline void ExtractPatchIntoBufferColumn(const RuntimeShape& input_shape, int w,
int h, int b, int kheight, int kwidth,
int stride_width, int stride_height,
int pad_width, int pad_height,
int in_width, int in_height,
int in_depth, int single_buffer_length,
int buffer_id, const T* in_data,
T* conv_buffer_data, uint8 zero_byte) {
ruy::profiler::ScopeLabel label("ExtractPatchIntoBufferColumn");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
// This chunk of code reshapes all the inputs corresponding to
// output (b, h, w) to a column vector in conv_buffer(:, buffer_id).
const int kwidth_times_indepth = kwidth * in_depth;
const int inwidth_times_indepth = in_width * in_depth;
const int ih_ungated_start = h * stride_height - pad_height;
const int ih_ungated_end = (ih_ungated_start + kheight);
const int ih_end = std::min(ih_ungated_end, in_height);
const int iw_ungated_start = w * stride_width - pad_width;
const int iw_ungated_end = (iw_ungated_start + kwidth);
const int iw_end = std::min(iw_ungated_end, in_width);
// If the patch is off the edge of the input image, skip writing those rows
// and columns from the patch into the output array.
const int h_offset = std::max(0, -ih_ungated_start);
const int w_offset = std::max(0, -iw_ungated_start);
const int ih_start = std::max(0, ih_ungated_start);
const int iw_start = std::max(0, iw_ungated_start);
const int single_row_num =
std::min(kwidth - w_offset, in_width - iw_start) * in_depth;
const int output_row_offset = (buffer_id * single_buffer_length);
int out_offset =
output_row_offset + (h_offset * kwidth + w_offset) * in_depth;
int in_offset = Offset(input_shape, b, ih_start, iw_start, 0);
// Express all of the calculations as padding around the input patch.
const int top_padding = h_offset;
const int bottom_padding = (ih_ungated_end - ih_end);
const int left_padding = w_offset;
const int right_padding = (iw_ungated_end - iw_end);
assert(single_row_num ==
((kwidth - (left_padding + right_padding)) * in_depth));
// Write out zeroes to the elements representing the top rows of the input
// patch that are off the edge of the input image.
if (top_padding > 0) {
const int top_row_elements = (top_padding * kwidth * in_depth);
memset(conv_buffer_data + output_row_offset, zero_byte,
(top_row_elements * sizeof(T)));
}
// If the patch is on the interior of the input image horizontally, just copy
// over the rows sequentially, otherwise add zero padding at the start or end.
if ((left_padding == 0) && (right_padding == 0)) {
for (int ih = ih_start; ih < ih_end; ++ih) {
memcpy(conv_buffer_data + out_offset, in_data + in_offset,
single_row_num * sizeof(T));
out_offset += kwidth_times_indepth;
in_offset += inwidth_times_indepth;
}
} else {
for (int ih = ih_start; ih < ih_end; ++ih) {
if (left_padding > 0) {
const int left_start = (out_offset - (left_padding * in_depth));
memset(conv_buffer_data + left_start, zero_byte,
(left_padding * in_depth * sizeof(T)));
}
memcpy(conv_buffer_data + out_offset, in_data + in_offset,
single_row_num * sizeof(T));
if (right_padding > 0) {
const int right_start = (out_offset + single_row_num);
memset(conv_buffer_data + right_start, zero_byte,
(right_padding * in_depth * sizeof(T)));
}
out_offset += kwidth_times_indepth;
in_offset += inwidth_times_indepth;
}
}
// If the bottom of the patch falls off the input image, pad the values
// representing those input rows with zeroes.
if (bottom_padding > 0) {
const int bottom_row_elements = (bottom_padding * kwidth * in_depth);
const int bottom_start =
output_row_offset +
((top_padding + (ih_end - ih_start)) * kwidth * in_depth);
memset(conv_buffer_data + bottom_start, zero_byte,
(bottom_row_elements * sizeof(T)));
}
}
// Supports per-batch zero_byte for per-batch asymmetric quantized inputs.
template <typename T>
void DilatedIm2col(const ConvParams& params, const RuntimeShape& input_shape,
const T* input_data, const RuntimeShape& filter_shape,
const RuntimeShape& output_shape, T* im2col_data,
const int32_t* zero_bytes, const int zero_bytes_len) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
// For dilated convolution, the input pixels are not contiguous therefore we
// can't use the same optimizations as Im2Col(). Though note this code would
// work fine for the non-dilated case too (though likely a bit slower).
ruy::profiler::ScopeLabel label("DilatedIm2col");
TFLITE_DCHECK(dilation_width_factor != 1 || dilation_height_factor != 1);
TFLITE_DCHECK(im2col_data);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
MatchingDim(output_shape, 3, filter_shape, 0);
// Construct the MxN sized im2col matrix.
// The rows M, are sub-ordered B x H x W
const RuntimeShape row_shape({1, batches, output_height, output_width});
// The columns, N, are sub-ordered Kh x Kw x Din
const RuntimeShape col_shape({1, filter_height, filter_width, input_depth});
// Use dimensions M and N to construct dims for indexing directly into im2col
const RuntimeShape im2col_shape(
{1, 1, row_shape.FlatSize(), col_shape.FlatSize()});
// Loop through the output rows (B x H x W)
for (int batch = 0; batch < batches; ++batch) {
const T zero_byte = zero_bytes_len > 1 ? static_cast<T>(zero_bytes[batch])
: static_cast<T>(zero_bytes[0]);
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
// Each im2col row is an output pixel. Arrange the input data in this
// row in an order we can conveniently multiply with the filter data.
int row_offset = Offset(row_shape, 0, batch, out_y, out_x);
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
// Loop through all the pixels of the filter (Kh x Kw)
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
if ((in_y >= 0) && (in_y < input_height)) {
// Filter row is within the input data.
// Loop through all the filter pixels in this row.
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
int col_offset = Offset(col_shape, 0, filter_y, filter_x, 0);
T* dst = im2col_data +
Offset(im2col_shape, 0, 0, row_offset, col_offset);
if ((in_x >= 0) && (in_x < input_width)) {
// Filter pixel is within the input, copy the input data.
T const* src =
input_data + Offset(input_shape, batch, in_y, in_x, 0);
memcpy(dst, src, input_depth * sizeof(T));
} else {
// Filter pixel is outside the input, zero it out.
memset(dst, zero_byte, input_depth * sizeof(T));
}
}
} else {
// Filter row is outside the input, zero out the entire filter row.
int col_offset = Offset(col_shape, 0, filter_y, 0, 0);
T* dst = im2col_data +
Offset(im2col_shape, 0, 0, row_offset, col_offset);
memset(dst, zero_byte, filter_width * input_depth * sizeof(T));
}
}
}
}
}
}
template <typename T>
void DilatedIm2col(const ConvParams& params, uint8 zero_byte,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& filter_shape,
const RuntimeShape& output_shape, T* im2col_data) {
const int32_t zero_point = static_cast<int32_t>(zero_byte);
DilatedIm2col<T>(params, input_shape, input_data, filter_shape, output_shape,
im2col_data, &zero_point, 1);
}
template <typename T>
void Im2col(const ConvParams& params, int kheight, int kwidth, uint8 zero_byte,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Im2col");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = input_shape.Dims(3);
const int input_width = input_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int output_depth = output_shape.Dims(3);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
int buffer_id = 0;
// Loop over the output nodes.
for (int b = 0; b < batches; ++b) {
for (int h = 0; h < output_height; ++h) {
for (int w = 0; w < output_width; ++w) {
ExtractPatchIntoBufferColumn(
input_shape, w, h, b, kheight, kwidth, stride_width, stride_height,
pad_width, pad_height, input_width, input_height, input_depth,
output_depth, buffer_id, input_data, output_data, zero_byte);
++buffer_id;
}
}
}
}
template <typename T>
void Im2col(const ConvParams& params, int kheight, int kwidth,
const int32_t* input_offsets, const int input_offsets_size,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Im2col");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
TFLITE_DCHECK_EQ(batches, input_offsets_size);
const int input_depth = input_shape.Dims(3);
const int input_width = input_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int output_depth = output_shape.Dims(3);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
int buffer_id = 0;
// Loop over the output nodes.
for (int b = 0; b < batches; ++b) {
uint8_t zero_byte = static_cast<uint8_t>(input_offsets[b]);
for (int h = 0; h < output_height; ++h) {
for (int w = 0; w < output_width; ++w) {
ExtractPatchIntoBufferColumn(
input_shape, w, h, b, kheight, kwidth, stride_width, stride_height,
pad_width, pad_height, input_width, input_height, input_depth,
output_depth, buffer_id, input_data, output_data, zero_byte);
++buffer_id;
}
}
}
}
template <typename T>
inline void ExtractPatchIntoBufferColumn3D(
int b, int d, int h, int w, // Output indexes.
int kdepth, int kheight, int kwidth, // Kernel params.
int stride_depth, int stride_height, int stride_width, // Stride params.
int pad_depth, int pad_height, int pad_width, // Padding params.
int in_depth, int in_height, int in_width, int in_channel, // Input shape.
int output_row_offset, const T* in_data, T* conv_buffer_data,
uint8 zero_byte) {
ruy::profiler::ScopeLabel label("ExtractPatchIntoBufferColumn3D");
// This chunk of code reshapes all the inputs corresponding to
// output (b, d, h, w) to a column vector in conv_buffer(:, buffer_id).
const int id_ungated_start = d * stride_depth - pad_depth;
const int id_start = std::max(0, id_ungated_start);
const int id_ungated_end = (id_ungated_start + kdepth);
const int id_end = std::min(id_ungated_end, in_depth);
const int ih_ungated_start = h * stride_height - pad_height;
const int ih_start = std::max(0, ih_ungated_start);
const int ih_ungated_end = (ih_ungated_start + kheight);
const int ih_end = std::min(ih_ungated_end, in_height);
const int iw_ungated_start = w * stride_width - pad_width;
const int iw_start = std::max(0, iw_ungated_start);
const int iw_ungated_end = (iw_ungated_start + kwidth);
const int iw_end = std::min(iw_ungated_end, in_width);
// Calculate the padding sizes.
const int d_padding_before = std::max(0, -id_ungated_start);
const int d_padding_after = (id_ungated_end - id_end);
const int h_padding_before = std::max(0, -ih_ungated_start);
const int h_padding_after = (ih_ungated_end - ih_end);
const int w_padding_before = std::max(0, -iw_ungated_start);
const int w_padding_after = (iw_ungated_end - iw_end);
// Memset if there are paddings in the depth dimension.
const int kd_stride_size = kheight * kwidth * in_channel;
const int id_stride_size = in_height * in_width * in_channel;
if (d_padding_before > 0) {
const int d_padding_before_elements = (d_padding_before * kd_stride_size);
memset(conv_buffer_data + output_row_offset, zero_byte,
(d_padding_before_elements * sizeof(T)));
}
if (d_padding_after > 0) {
const int d_padding_after_elements = (d_padding_after * kd_stride_size);
const int bottom_start =
output_row_offset + (kdepth - d_padding_after) * kd_stride_size;
memset(conv_buffer_data + bottom_start, zero_byte,
(d_padding_after_elements * sizeof(T)));
}
// If there are paddings in height or width dimension, menset the entire area
// to take advantage of sequential memory handling performance.
int out_offset = output_row_offset + d_padding_before * kd_stride_size;
if (h_padding_before > 0 || h_padding_after > 0 || w_padding_before > 0 ||
w_padding_after > 0) {
const int middle_elements = (id_end - id_start) * kd_stride_size;
memset(conv_buffer_data + out_offset, zero_byte,
(middle_elements * sizeof(T)));
}
// Copy the valid data from the input tensor.
const int kh_stride_size = kwidth * in_channel;
const int ih_stride_size = in_width * in_channel;
const int h_padding = h_padding_before + h_padding_after;
const int w_padding = w_padding_before + w_padding_after;
const int single_row_num = (kwidth - w_padding) * in_channel;
out_offset +=
h_padding_before * kh_stride_size + w_padding_before * in_channel;
const int in_offset_without_d = b * in_depth * id_stride_size +
ih_start * ih_stride_size +
iw_start * in_channel;
for (int id = id_start; id < id_end; ++id) {
int in_offset = in_offset_without_d + id * id_stride_size;
for (int ih = ih_start; ih < ih_end; ++ih) {
memcpy(conv_buffer_data + out_offset, in_data + in_offset,
single_row_num * sizeof(T));
out_offset += kh_stride_size;
in_offset += ih_stride_size;
}
out_offset += h_padding * kh_stride_size;
}
}
template <typename T>
void Im2col3D(const Conv3DParams& params, int kdepth, int kheight, int kwidth,
uint8 zero_byte, const RuntimeShape& input_shape,
const T* input_data, const RuntimeShape& im2col_shape,
T* im2col_data) {
ruy::profiler::ScopeLabel label("Im2col3D");
const int stride_depth = params.stride_depth;
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_depth = params.padding_values.depth;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(im2col_shape.DimensionsCount(), 5);
const int batches = MatchingDim(input_shape, 0, im2col_shape, 0);
const int input_depth = input_shape.Dims(1);
const int input_height = input_shape.Dims(2);
const int input_width = input_shape.Dims(3);
const int input_channel = input_shape.Dims(4);
const int output_depth = im2col_shape.Dims(1);
const int output_height = im2col_shape.Dims(2);
const int output_width = im2col_shape.Dims(3);
const int output_channel = im2col_shape.Dims(4);
int buffer_id = 0;
// Loop over the output nodes.
for (int b = 0; b < batches; ++b) {
for (int d = 0; d < output_depth; ++d) {
for (int h = 0; h < output_height; ++h) {
for (int w = 0; w < output_width; ++w) {
ExtractPatchIntoBufferColumn3D(
b, d, h, w, kdepth, kheight, kwidth, stride_depth, stride_height,
stride_width, pad_depth, pad_height, pad_width, input_depth,
input_height, input_width, input_channel, buffer_id, input_data,
im2col_data, zero_byte);
buffer_id += output_channel;
}
}
}
}
}
template <typename T>
inline void DilatedIm2col3D(const Conv3DParams& params, int filter_depth,
int filter_height, int filter_width,
uint8 zero_byte, const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& im2col_shape, T* im2col_data) {
ruy::profiler::ScopeLabel label("DilatedIm2col3D");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(im2col_shape.DimensionsCount(), 5);
// Only NDHWC format is currently supported.
const int batches = MatchingDim(input_shape, 0, im2col_shape, 0);
const int input_channels = input_shape.Dims(4);
const int input_width = input_shape.Dims(3);
const int input_height = input_shape.Dims(2);
const int input_depth = input_shape.Dims(1);
const int output_width = im2col_shape.Dims(3);
const int output_height = im2col_shape.Dims(2);
const int output_depth = im2col_shape.Dims(1);
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int pad_depth = params.padding_values.depth;
// Construct the MxN sized im2col matrix.
// The rows M, are sub-ordered B x D x H x W.
const RuntimeShape row_shape(
{1, batches, output_depth, output_height, output_width});
// The columns, N, are sub-ordered Kd x Kh x Kw x Din.
const RuntimeShape col_shape(
{1, filter_depth, filter_height, filter_width, input_channels});
// Use dimensions M and N to construct dims for indexing directly into im2col.
const RuntimeShape im2col_reshaped(
{1, 1, row_shape.FlatSize(), col_shape.FlatSize()});
for (int batch = 0; batch < batches; ++batch) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
const int in_d_origin = (out_d * params.stride_depth) - pad_depth;
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * params.stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * params.stride_width) - pad_width;
const int row_offset =
Offset(row_shape, 0, batch, out_d, out_y, out_x);
for (int filter_d = 0; filter_d < filter_depth; ++filter_d) {
const int in_d = in_d_origin + params.dilation_depth * filter_d;
if ((in_d >= 0) && (in_d < input_depth)) {
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y =
in_y_origin + params.dilation_height * filter_y;
if ((in_y >= 0) && (in_y < input_height)) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x =
in_x_origin + params.dilation_width * filter_x;
int col_offset =
Offset(col_shape, 0, filter_d, filter_y, filter_x, 0);
T* dst = im2col_data + Offset(im2col_reshaped, 0, 0,
row_offset, col_offset);
if ((in_x >= 0) && (in_x < input_width)) {
// Filter pixel is within the input, copy the input data.
T const* src = input_data + Offset(input_shape, batch,
in_d, in_y, in_x, 0);
memcpy(dst, src, input_depth * sizeof(T));
} else {
// Filter pixel is outside the input, zero it out.
memset(dst, zero_byte, input_depth * sizeof(T));
}
}
} else {
const int col_offset =
Offset(col_shape, 0, filter_d, filter_y, 0, 0);
T* dst = im2col_data + Offset(im2col_reshaped, 0, 0,
row_offset, col_offset);
memset(dst, zero_byte,
filter_width * input_depth * sizeof(T));
}
}
} else {
const int col_offset = Offset(col_shape, 0, filter_d, 0, 0, 0);
T* dst = im2col_data +
Offset(im2col_reshaped, 0, 0, row_offset, col_offset);
memset(dst, zero_byte,
filter_height * filter_width * input_depth * sizeof(T));
}
}
}
}
}
}
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_IM2COL_UTILS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/im2col_utils.h | C++ | apache-2.0 | 23,934 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_ADD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_ADD_H_
#include <algorithm>
#include "fixedpoint/fixedpoint.h"
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/add.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.
inline void AddElementwise(int size, const ArithmeticParams& params,
const int8* input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("AddElementwiseInt8/8bit");
int i = 0;
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
#ifdef USE_NEON
const int8x16_t output_activation_min_vector =
vdupq_n_s8(params.quantized_activation_min);
const int8x16_t output_activation_max_vector =
vdupq_n_s8(params.quantized_activation_max);
const int input1_left_shift = params.left_shift + params.input1_shift;
const int input2_left_shift = params.left_shift + params.input2_shift;
const int32x4_t input1_left_dup = vdupq_n_s32(input1_left_shift);
const int32x4_t input2_left_dup = vdupq_n_s32(input2_left_shift);
const int16x8_t input1_offset_dup = vdupq_n_s16(params.input1_offset);
const int16x8_t input2_offset_dup = vdupq_n_s16(params.input2_offset);
for (; i <= size - 16; i += 16) {
const int8x16_t input1_val_original = vld1q_s8(input1_data + i);
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int16x8_t input1_val_s16_high =
vmovl_s8(vget_high_s8(input1_val_original));
const int16x8_t input1_val_s16_low =
vmovl_s8(vget_low_s8(input1_val_original));
const int16x8_t input2_val_s16_high =
vmovl_s8(vget_high_s8(input2_val_original));
const int16x8_t input2_val_s16_low =
vmovl_s8(vget_low_s8(input2_val_original));
const int16x8_t input1_val_high =
vaddq_s16(input1_val_s16_high, input1_offset_dup);
const int16x8_t input2_val_high =
vaddq_s16(input2_val_s16_high, input2_offset_dup);
const int16x8_t input1_val_low =
vaddq_s16(input1_val_s16_low, input1_offset_dup);
const int16x8_t input2_val_low =
vaddq_s16(input2_val_s16_low, input2_offset_dup);
const int16x4_t input1_val_high_high = vget_high_s16(input1_val_high);
const int16x4_t input1_val_high_low = vget_low_s16(input1_val_high);
const int16x4_t input1_val_low_high = vget_high_s16(input1_val_low);
const int16x4_t input1_val_low_low = vget_low_s16(input1_val_low);
const int16x4_t input2_val_high_high = vget_high_s16(input2_val_high);
const int16x4_t input2_val_high_low = vget_low_s16(input2_val_high);
const int16x4_t input2_val_low_high = vget_high_s16(input2_val_low);
const int16x4_t input2_val_low_low = vget_low_s16(input2_val_low);
int32x4_t x111 = vmovl_s16(input1_val_low_low);
int32x4_t x112 = vmovl_s16(input1_val_low_high);
int32x4_t x121 = vmovl_s16(input1_val_high_low);
int32x4_t x122 = vmovl_s16(input1_val_high_high);
int32x4_t x211 = vmovl_s16(input2_val_low_low);
int32x4_t x212 = vmovl_s16(input2_val_low_high);
int32x4_t x221 = vmovl_s16(input2_val_high_low);
int32x4_t x222 = vmovl_s16(input2_val_high_high);
x111 = vshlq_s32(x111, input1_left_dup);
x112 = vshlq_s32(x112, input1_left_dup);
x121 = vshlq_s32(x121, input1_left_dup);
x122 = vshlq_s32(x122, input1_left_dup);
x211 = vshlq_s32(x211, input2_left_dup);
x212 = vshlq_s32(x212, input2_left_dup);
x221 = vshlq_s32(x221, input2_left_dup);
x222 = vshlq_s32(x222, input2_left_dup);
x111 = vqrdmulhq_n_s32(x111, params.input1_multiplier);
x112 = vqrdmulhq_n_s32(x112, params.input1_multiplier);
x121 = vqrdmulhq_n_s32(x121, params.input1_multiplier);
x122 = vqrdmulhq_n_s32(x122, params.input1_multiplier);
x211 = vqrdmulhq_n_s32(x211, params.input2_multiplier);
x212 = vqrdmulhq_n_s32(x212, params.input2_multiplier);
x221 = vqrdmulhq_n_s32(x221, params.input2_multiplier);
x222 = vqrdmulhq_n_s32(x222, params.input2_multiplier);
int32x4_t s11 = vaddq_s32(x111, x211);
int32x4_t s12 = vaddq_s32(x112, x212);
int32x4_t s21 = vaddq_s32(x121, x221);
int32x4_t s22 = vaddq_s32(x122, x222);
s11 = vqrdmulhq_n_s32(s11, params.output_multiplier);
s12 = vqrdmulhq_n_s32(s12, params.output_multiplier);
s21 = vqrdmulhq_n_s32(s21, params.output_multiplier);
s22 = vqrdmulhq_n_s32(s22, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
s11 = RoundingDivideByPOT(s11, -params.output_shift);
s12 = RoundingDivideByPOT(s12, -params.output_shift);
s21 = RoundingDivideByPOT(s21, -params.output_shift);
s22 = RoundingDivideByPOT(s22, -params.output_shift);
const int16x4_t s11_narrowed = vmovn_s32(s11);
const int16x4_t s12_narrowed = vmovn_s32(s12);
const int16x4_t s21_narrowed = vmovn_s32(s21);
const int16x4_t s22_narrowed = vmovn_s32(s22);
const int16x8_t s1 = vaddq_s16(vcombine_s16(s11_narrowed, s12_narrowed),
vdupq_n_s16(params.output_offset));
const int16x8_t s2 = vaddq_s16(vcombine_s16(s21_narrowed, s22_narrowed),
vdupq_n_s16(params.output_offset));
const int8x16_t s = vcombine_s8(vqmovn_s16(s1), vqmovn_s16(s2));
const int8x16_t clamped =
vmaxq_s8(output_activation_min_vector,
vminq_s8(output_activation_max_vector, s));
vst1q_s8(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
const int32 input1_val = params.input1_offset + input1_data[i];
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<int8>(clamped_output);
}
}
// Scalar-broadcast add that can be used for inner loop of more general
// broadcast add, so that, for example, scalar-broadcast with batch will still
// be fast.
inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
int8 input1_data, const int8* input2_data,
int8* output_data) {
using gemmlowp::RoundingDivideByPOT;
ruy::profiler::ScopeLabel label("AddScalarBroadcastInt8/8bit");
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
int i = 0;
#ifdef USE_NEON
const int32x4_t left_shift_dup = vdupq_n_s32(params.left_shift);
const int8x8_t output_activation_min_vector =
vdup_n_s8(params.quantized_activation_min);
const int8x8_t output_activation_max_vector =
vdup_n_s8(params.quantized_activation_max);
// Process broadcast scalar.
const int8x8_t input1_val_original = vdup_n_s8(input1_data);
const int16x8_t input1_val_s16 = vmovl_s8(input1_val_original);
const int16x8_t input1_val =
vaddq_s16(input1_val_s16, vdupq_n_s16(params.input1_offset));
const int16x4_t input1_val_high = vget_high_s16(input1_val);
const int16x4_t input1_val_low = vget_low_s16(input1_val);
int32x4_t x11 = vmovl_s16(input1_val_low);
int32x4_t x12 = vmovl_s16(input1_val_high);
x11 = vshlq_s32(x11, left_shift_dup);
x12 = vshlq_s32(x12, left_shift_dup);
x11 = vqrdmulhq_n_s32(x11, params.input1_multiplier);
x12 = vqrdmulhq_n_s32(x12, params.input1_multiplier);
const int32x4_t input1_shift_dup = vdupq_n_s32(params.input1_shift);
x11 = vshlq_s32(x11, input1_shift_dup);
x12 = vshlq_s32(x12, input1_shift_dup);
for (; i <= size - 8; i += 8) {
const int8x8_t input2_val_original = vld1_s8(input2_data + i);
const int16x8_t input2_val_s16 = vmovl_s8(input2_val_original);
const int16x8_t input2_val =
vaddq_s16(input2_val_s16, vdupq_n_s16(params.input2_offset));
const int16x4_t input2_val_high = vget_high_s16(input2_val);
const int16x4_t input2_val_low = vget_low_s16(input2_val);
int32x4_t x21 = vmovl_s16(input2_val_low);
int32x4_t x22 = vmovl_s16(input2_val_high);
x21 = vshlq_s32(x21, left_shift_dup);
x22 = vshlq_s32(x22, left_shift_dup);
x21 = vqrdmulhq_n_s32(x21, params.input2_multiplier);
x22 = vqrdmulhq_n_s32(x22, params.input2_multiplier);
const int32x4_t input2_shift_dup = vdupq_n_s32(params.input2_shift);
x21 = vshlq_s32(x21, input2_shift_dup);
x22 = vshlq_s32(x22, input2_shift_dup);
int32x4_t s1 = vaddq_s32(x11, x21);
int32x4_t s2 = vaddq_s32(x12, x22);
s1 = vqrdmulhq_n_s32(s1, params.output_multiplier);
s2 = vqrdmulhq_n_s32(s2, params.output_multiplier);
s1 = RoundingDivideByPOT(s1, -params.output_shift);
s2 = RoundingDivideByPOT(s2, -params.output_shift);
const int16x4_t s1_narrowed = vmovn_s32(s1);
const int16x4_t s2_narrowed = vmovn_s32(s2);
const int16x8_t s = vaddq_s16(vcombine_s16(s1_narrowed, s2_narrowed),
vdupq_n_s16(params.output_offset));
const int8x8_t clamped =
vmax_s8(output_activation_min_vector,
vmin_s8(output_activation_max_vector, vqmovn_s16(s)));
vst1_s8(output_data + i, clamped);
}
#endif // NEON
if (i < size) {
// Process broadcast scalar.
const int32 input1_val = params.input1_offset + input1_data;
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
for (; i < size; ++i) {
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier,
params.input2_shift);
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<int8>(clamped_output);
}
}
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8* input1_data,
const RuntimeShape& input2_shape, const int8* input2_data,
const RuntimeShape& output_shape, int8* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
ruy::profiler::ScopeLabel label("AddInt8/8bit");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void BroadcastAddDispatch(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8* input1_data,
const RuntimeShape& input2_shape,
const int8* input2_data,
const RuntimeShape& output_shape,
int8* output_data) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return reference_integer_ops::BroadcastAdd4DSlow(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data);
}
optimized_ops::BinaryBroadcastFiveFold(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data, AddElementwise, AddScalarBroadcast);
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_ADD_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/add.h | C++ | apache-2.0 | 14,393 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_CONV_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/im2col_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
// Fixed-point per-channel-quantization convolution reference kernel.
inline void ConvPerChannel(
const ConvParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
const RuntimeShape& im2col_shape, int8* im2col_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("Conv/8bit");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int32 input_offset = params.input_offset;
const int32 output_offset = params.output_offset;
// Set min and max value of the output.
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int8* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
const int8 input_zero_point = -input_offset;
const uint8 zero_point_byte =
*reinterpret_cast<const uint8*>(&input_zero_point);
if (need_dilated_im2col) {
TFLITE_DCHECK(im2col_data);
optimized_ops::DilatedIm2col(params, zero_point_byte, input_shape,
input_data, filter_shape, output_shape,
im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
optimized_ops::Im2col(params, filter_height, filter_width, zero_point_byte,
input_shape, input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
const int gemm_input_rows = gemm_input_shape->Dims(3);
const int gemm_input_cols = FlatSizeSkipDim(*gemm_input_shape, 3);
const int filter_rows = filter_shape.Dims(0);
const int filter_cols = FlatSizeSkipDim(filter_shape, 0);
const int output_rows = output_shape.Dims(3);
// See b/79927784.
// const int output_cols = FlatSizeSkipDim(output_shape, 3);
const int output_cols =
output_shape.Dims(0) * output_shape.Dims(1) * output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(output_cols, gemm_input_cols);
TFLITE_DCHECK_EQ(filter_cols, gemm_input_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
cpu_backend_gemm::MatrixParams<int8> lhs_params;
lhs_params.rows = filter_rows;
lhs_params.cols = filter_cols;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.zero_point = 0; // filter is symmetric-quantized
cpu_backend_gemm::MatrixParams<int8> rhs_params;
rhs_params.rows = gemm_input_rows;
rhs_params.cols = gemm_input_cols;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.zero_point = -input_offset;
cpu_backend_gemm::MatrixParams<int8> dst_params;
dst_params.rows = output_rows;
dst_params.cols = output_cols;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.zero_point = output_offset;
cpu_backend_gemm::GemmParams<
int32, int8,
cpu_backend_gemm::QuantizationFlavor::kIntegerWithPerRowMultiplier>
gemm_params;
gemm_params.bias = bias_data;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
gemm_params.multiplier_fixedpoint_perchannel = output_multiplier;
gemm_params.multiplier_exponent_perchannel = output_shift;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, gemm_input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/conv.h | C++ | apache-2.0 | 5,968 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_H_
#include <string.h>
#include <algorithm>
#include <vector>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_uint8_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
namespace depthwise_conv {
// Implementation of quantized DepthwiseConv
template <bool kAllowStrided, int kFixedInputDepth, int kFixedDepthMultiplier>
struct QuantizedDepthwiseConvKernel {};
#ifdef USE_NEON
template <>
struct QuantizedDepthwiseConvKernel<true, 8, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8x2_t filter_s8;
filter_s8.val[0] = vld1_s8(filter_ptr);
filter_s8.val[1] = vld1_s8(filter_ptr + 8);
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vmovl_s8(filter_s8.val[i]);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4x2_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_s32(acc_buffer_ptr + 4 * i);
acc[i].val[1] = vld1q_s32(acc_buffer_ptr + 4 * i + 8);
}
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += input_ptr_increment;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[0].val[i] = vmlal_s16(acc[0].val[i], vget_low_s16(filter[i]),
vget_low_s16(input_dup2.val[i]));
acc[1].val[i] = vmlal_s16(acc[1].val[i], vget_high_s16(filter[i]),
vget_high_s16(input_dup2.val[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i].val[0]);
vst1q_s32(acc_buffer_ptr + 4 * i + 8, acc[i].val[1]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 8, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
const int8x8_t filter_s8 = vld1_s8(filter_ptr);
const int16x8_t filter = vmovl_s8(filter_s8);
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8[2];
for (int i = 0; i < 2; i++) {
input_s8[i] = vld1_s8(input_ptr + 8 * i);
}
input_ptr += 16;
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vmovl_s8(input_s8[i]);
}
for (int i = 0; i < 2; i++) {
input[i] = vaddq_s16(input[i], vdupq_n_s16(input_offset));
}
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], vget_low_s16(filter), vget_low_s16(input[0]));
acc[1] =
vmlal_s16(acc[1], vget_high_s16(filter), vget_high_s16(input[0]));
acc[2] = vmlal_s16(acc[2], vget_low_s16(filter), vget_low_s16(input[1]));
acc[3] =
vmlal_s16(acc[3], vget_high_s16(filter), vget_high_s16(input[1]));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle 1 output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x4_t acc[2];
acc[0] = vld1q_s32(acc_buffer_ptr);
acc[1] = vld1q_s32(acc_buffer_ptr + 4);
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], vget_low_s16(filter), vget_low_s16(input));
acc[1] = vmlal_s16(acc[1], vget_high_s16(filter), vget_high_s16(input));
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc[0]);
vst1q_s32(acc_buffer_ptr + 4, acc[1]);
acc_buffer_ptr += 8;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 4, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
const int8x8_t filter_s8 = vld1_s8(filter_ptr);
const int16x8_t filter = vmovl_s8(filter_s8);
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] = vmlal_s16(acc[2 * i + 0], vget_low_s16(filter),
vget_low_s16(input_dup2.val[i]));
acc[2 * i + 1] = vmlal_s16(acc[2 * i + 1], vget_high_s16(filter),
vget_high_s16(input_dup2.val[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
input_ptr += 4;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x4x2_t input_dup2 = vzip_s16(input, input);
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], vget_low_s16(filter), input_dup2.val[0]);
acc[1] = vmlal_s16(acc[1], vget_high_s16(filter), input_dup2.val[1]);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 2, 8> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
const int8x8_t filter_s8 = vld1_s8(filter_ptr + 8 * i);
filter[i] = vmovl_s8(filter_s8);
}
int outp = 0;
// Handle two output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc[8];
for (int i = 0; i < 8; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
input_ptr += 4;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]), input, 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]), input, 0);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]), input, 1);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]), input, 1);
acc[4] = vmlal_lane_s16(acc[4], vget_low_s16(filter[0]), input, 2);
acc[5] = vmlal_lane_s16(acc[5], vget_high_s16(filter[0]), input, 2);
acc[6] = vmlal_lane_s16(acc[6], vget_low_s16(filter[1]), input, 3);
acc[7] = vmlal_lane_s16(acc[7], vget_high_s16(filter[1]), input, 3);
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 8; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 32;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_ptr += 2;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]), input, 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]), input, 0);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]), input, 1);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]), input, 1);
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 2, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[2], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[3], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input_dup2.val[0]));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input_dup2.val[0]));
acc[2] = vmlal_s16(acc[2], filter, vget_low_s16(input_dup2.val[1]));
acc[3] = vmlal_s16(acc[3], filter, vget_high_s16(input_dup2.val[1]));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_ptr += 2;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x4_t input_dup2 = vzip_s16(input, input).val[0];
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input_dup2);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 2, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the accumulators from acc_buffer.
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8[2];
for (int i = 0; i < 2; i++) {
input_s8[i] = vld1_s8(input_ptr + 8 * i);
}
input_ptr += 16;
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vmovl_s8(input_s8[i]);
}
for (int i = 0; i < 2; i++) {
input[i] = vaddq_s16(input[i], vdupq_n_s16(input_offset));
}
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input[0]));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input[0]));
acc[2] = vmlal_s16(acc[2], filter, vget_low_s16(input[1]));
acc[3] = vmlal_s16(acc[3], filter, vget_high_s16(input[1]));
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer.
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate.
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input));
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
input_ptr += 4;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer.
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle 1 output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x2_t acc = vld1_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_ptr += 2;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vget_low_s32(vmlal_s16(vcombine_s32(acc, acc), filter, input));
// Store the accumulators back to acc_buffer.
vst1_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 1, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Duplicate the input values, 2-fold
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], filter, vget_low_s16(input_dup2.val[0]));
acc[1] = vmlal_s16(acc[1], filter, vget_high_s16(input_dup2.val[0]));
acc[2] = vmlal_s16(acc[2], filter, vget_low_s16(input_dup2.val[1]));
acc[3] = vmlal_s16(acc[3], filter, vget_high_s16(input_dup2.val[1]));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x2_t acc = vld1_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
const uint32 input = *input_ptr++ + input_offset;
// Multiply-accumulate
acc = vget_low_s32(vmlal_n_s16(vcombine_s32(acc, acc), filter, input));
// Store the accumulators back to acc_buffer
vst1_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 1, 4> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[2], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[3], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle 8 output pixels at a time.
for (; outp <= num_output_pixels - 8; outp += 8) {
// Load the accumulators from acc_buffer
int32x4_t acc[8];
for (int i = 0; i < 8; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], filter, vget_low_s16(input), 0);
acc[1] = vmlal_lane_s16(acc[1], filter, vget_low_s16(input), 1);
acc[2] = vmlal_lane_s16(acc[2], filter, vget_low_s16(input), 2);
acc[3] = vmlal_lane_s16(acc[3], filter, vget_low_s16(input), 3);
acc[4] = vmlal_lane_s16(acc[4], filter, vget_high_s16(input), 0);
acc[5] = vmlal_lane_s16(acc[5], filter, vget_high_s16(input), 1);
acc[6] = vmlal_lane_s16(acc[6], filter, vget_high_s16(input), 2);
acc[7] = vmlal_lane_s16(acc[7], filter, vget_high_s16(input), 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 8; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 32;
}
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
input_ptr += 4;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], filter, input, 0);
acc[1] = vmlal_lane_s16(acc[1], filter, input, 1);
acc[2] = vmlal_lane_s16(acc[2], filter, input, 2);
acc[3] = vmlal_lane_s16(acc[3], filter, input, 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
const uint32 input = *input_ptr++ + input_offset;
// Multiply-accumulate
acc = vmlal_n_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 4, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[2], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[3], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle 4 output pixels at a time.
for (; outp <= num_output_pixels - 4; outp += 4) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
const int8x8_t input_s8 = vld1_s8(input_ptr + 8 * i);
const int16x8_t input_s16 = vmovl_s8(input_s8);
input[i] = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
}
input_ptr += 16;
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] =
vmlal_s16(acc[2 * i + 0], filter, vget_low_s16(input[i]));
acc[2 * i + 1] =
vmlal_s16(acc[2 * i + 1], filter, vget_high_s16(input[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc;
acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
input_ptr += 4;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 4, 4> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
const int8x8_t filter_s8 = vld1_s8(filter_ptr + 8 * i);
filter[i] = vmovl_s8(filter_s8);
}
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer
int32x4_t acc[8];
for (int i = 0; i < 8; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]),
vget_low_s16(input), 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]),
vget_low_s16(input), 1);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]),
vget_low_s16(input), 2);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]),
vget_low_s16(input), 3);
acc[4] = vmlal_lane_s16(acc[4], vget_low_s16(filter[0]),
vget_high_s16(input), 0);
acc[5] = vmlal_lane_s16(acc[5], vget_high_s16(filter[0]),
vget_high_s16(input), 1);
acc[6] = vmlal_lane_s16(acc[6], vget_low_s16(filter[1]),
vget_high_s16(input), 2);
acc[7] = vmlal_lane_s16(acc[7], vget_high_s16(filter[1]),
vget_high_s16(input), 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 8; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 32;
}
// Handle one output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
input_ptr += 4;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc[0] = vmlal_lane_s16(acc[0], vget_low_s16(filter[0]), input, 0);
acc[1] = vmlal_lane_s16(acc[1], vget_high_s16(filter[0]), input, 1);
acc[2] = vmlal_lane_s16(acc[2], vget_low_s16(filter[1]), input, 2);
acc[3] = vmlal_lane_s16(acc[3], vget_high_s16(filter[1]), input, 3);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 0, 3> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// We will have to duplicate bytes in a NEON register, 3-fold.
// We will do that by register-level table-look-up using VTBL instructions.
// Here we prepare the registers containing the table-lookup indices.
static const int8 dup3_indices_array[3][8] = {{0, 0, 0, 1, 1, 1, 2, 2},
{2, 3, 3, 3, 4, 4, 4, 5},
{5, 5, 6, 6, 6, 7, 7, 7}};
int8x8_t dup3_indices[3];
for (int i = 0; i < 3; i++) {
dup3_indices[i] = vld1_s8(dup3_indices_array[i]);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const int8* local_filter_ptr = filter_ptr;
const int8* local_input_ptr = input_ptr;
int ic = 0;
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters.
int16x8_t filter[3];
int8x8x3_t filter_s8;
filter_s8.val[0] = vld1_s8(local_filter_ptr);
filter_s8.val[1] = vld1_s8(local_filter_ptr + 8);
filter_s8.val[2] = vld1_s8(local_filter_ptr + 16);
local_filter_ptr += 24;
for (int i = 0; i < 3; i++) {
filter[i] = vmovl_s8(filter_s8.val[i]);
}
// Load the inputs, duplicate 3-fold, add input_offset.
const int8x8_t input_s8 = vld1_s8(local_input_ptr);
local_input_ptr += 8;
int8x8_t input_s8_dup3[3];
for (int i = 0; i < 3; i++) {
input_s8_dup3[i] = vtbl1_s8(input_s8, dup3_indices[i]);
}
int16x8_t input_dup3[3];
for (int i = 0; i < 3; i++) {
const int16x8_t input_s16_dup3 = vmovl_s8(input_s8_dup3[i]);
input_dup3[i] = vaddq_s16(input_s16_dup3, vdupq_n_s16(input_offset));
}
// Load the accumulators from acc_buffer
int32x4x3_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_s32(acc_buffer_ptr + 4 * i);
acc[i].val[1] = vld1q_s32(acc_buffer_ptr + 4 * i + 8);
acc[i].val[2] = vld1q_s32(acc_buffer_ptr + 4 * i + 16);
}
// Multiply-accumulate
for (int j = 0; j < 3; j++) {
acc[0].val[j] = vmlal_s16(acc[0].val[j], vget_low_s16(input_dup3[j]),
vget_low_s16(filter[j]));
acc[1].val[j] = vmlal_s16(acc[1].val[j], vget_high_s16(input_dup3[j]),
vget_high_s16(filter[j]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i].val[0]);
vst1q_s32(acc_buffer_ptr + 4 * i + 8, acc[i].val[1]);
vst1q_s32(acc_buffer_ptr + 4 * i + 16, acc[i].val[2]);
}
acc_buffer_ptr += 24;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
const int16 input_val = *local_input_ptr++ + input_offset;
for (int i = 0; i < 3; i++) {
*acc_buffer_ptr++ +=
static_cast<int32>(local_filter_ptr[i]) * input_val;
}
local_filter_ptr += 3;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 0, 2> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const int8* local_filter_ptr = filter_ptr;
const int8* local_input_ptr = input_ptr;
int ic = 0;
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters.
int16x8_t filter[2];
int8x8x2_t filter_s8;
filter_s8.val[0] = vld1_s8(local_filter_ptr);
filter_s8.val[1] = vld1_s8(local_filter_ptr + 8);
local_filter_ptr += 16;
for (int i = 0; i < 2; i++) {
filter[i] = vmovl_s8(filter_s8.val[i]);
}
// Load the inputs, add input_offset, duplicate 2-fold.
const int8x8_t input_s8 = vld1_s8(local_input_ptr);
local_input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
const int16x8x2_t input_dup2 = vzipq_s16(input, input);
// Load the accumulators from acc_buffer.
int32x4x2_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_s32(acc_buffer_ptr + 4 * i);
acc[i].val[1] = vld1q_s32(acc_buffer_ptr + 4 * i + 8);
}
// Multiply-accumulate.
for (int j = 0; j < 2; j++) {
acc[0].val[j] = vmlal_s16(acc[0].val[j], vget_low_s16(filter[j]),
vget_low_s16(input_dup2.val[j]));
acc[1].val[j] = vmlal_s16(acc[1].val[j], vget_high_s16(filter[j]),
vget_high_s16(input_dup2.val[j]));
}
// Store the accumulators back to acc_buffer.
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i].val[0]);
vst1q_s32(acc_buffer_ptr + 4 * i + 8, acc[i].val[1]);
}
acc_buffer_ptr += 16;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
// Load the inputs.
const int16 input_val = *local_input_ptr++ + input_offset;
for (int i = 0; i < 2; i++) {
*acc_buffer_ptr++ +=
static_cast<int32>(local_filter_ptr[i]) * input_val;
}
local_filter_ptr += 2;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 0, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
const int8* local_filter_ptr = filter_ptr;
const int8* local_input_ptr = input_ptr;
int ic = 0;
// Handle 16 input channels at a time.
for (; ic <= input_depth - 16; ic += 16) {
// Load the filters.
int8x8_t filter_s8_0 = vld1_s8(local_filter_ptr + 8 * 0);
int8x8_t filter_s8_1 = vld1_s8(local_filter_ptr + 8 * 1);
local_filter_ptr += 16;
int16x8_t filter_0 = vmovl_s8(filter_s8_0);
int16x8_t filter_1 = vmovl_s8(filter_s8_1);
// Load the inputs, add input_offset.
int8x8_t input_s8_0 = vld1_s8(local_input_ptr + 8 * 0);
int8x8_t input_s8_1 = vld1_s8(local_input_ptr + 8 * 1);
local_input_ptr += 16;
int16x8_t input_0 = vmovl_s8(input_s8_0);
int16x8_t input_1 = vmovl_s8(input_s8_1);
input_0 = vaddq_s16(input_0, vdupq_n_s16(input_offset));
input_1 = vaddq_s16(input_1, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
int32x4_t acc_3 = vld1q_s32(acc_buffer_ptr + 4 * 3);
acc_0 = vmlal_s16(acc_0, vget_low_s16(input_0), vget_low_s16(filter_0));
acc_1 =
vmlal_s16(acc_1, vget_high_s16(input_0), vget_high_s16(filter_0));
acc_2 = vmlal_s16(acc_2, vget_low_s16(input_1), vget_low_s16(filter_1));
acc_3 =
vmlal_s16(acc_3, vget_high_s16(input_1), vget_high_s16(filter_1));
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_s32(acc_buffer_ptr + 4 * 3, acc_3);
acc_buffer_ptr += 16;
}
// Handle 8 input channels at a time.
for (; ic <= input_depth - 8; ic += 8) {
// Load the filters.
const int8x8_t filter_s8 = vld1_s8(local_filter_ptr);
local_filter_ptr += 8;
const int16x8_t filter = vmovl_s8(filter_s8);
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(local_input_ptr);
local_input_ptr += 8;
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], vget_low_s16(input), vget_low_s16(filter));
acc[1] = vmlal_s16(acc[1], vget_high_s16(input), vget_high_s16(filter));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
// Handle one input channel at a time.
for (; ic < input_depth; ic++) {
const int16 input_val = *local_input_ptr++ + input_offset;
const int16 filter_val = *local_filter_ptr++;
*acc_buffer_ptr++ += static_cast<int32>(filter_val) * input_val;
}
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 16, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8[2];
for (int i = 0; i < 2; i++) {
filter_s8[i] = vld1_s8(filter_ptr + 8 * i);
}
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vmovl_s8(filter_s8[i]);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs, add input_offset.
int8x8_t input_s8[2];
for (int i = 0; i < 2; i++) {
input_s8[i] = vld1_s8(input_ptr + 8 * i);
}
input_ptr += input_ptr_increment;
int16x8_t input[2];
for (int i = 0; i < 2; i++) {
input[i] = vmovl_s8(input_s8[i]);
}
for (int i = 0; i < 2; i++) {
input[i] = vaddq_s16(input[i], vdupq_n_s16(input_offset));
}
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] = vmlal_s16(acc[2 * i + 0], vget_low_s16(input[i]),
vget_low_s16(filter[i]));
acc[2 * i + 1] = vmlal_s16(acc[2 * i + 1], vget_high_s16(input[i]),
vget_high_s16(filter[i]));
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 8, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
const int8x8_t filter_s8 = vld1_s8(filter_ptr);
const int16x8_t filter = vmovl_s8(filter_s8);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs, add input_offset.
const int8x8_t input_s8 = vld1_s8(input_ptr);
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x8_t input = vaddq_s16(input_s16, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlal_s16(acc[0], vget_low_s16(input), vget_low_s16(filter));
acc[1] = vmlal_s16(acc[1], vget_high_s16(input), vget_high_s16(filter));
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
input_ptr += input_ptr_increment;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 16> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8[2];
for (int i = 0; i < 2; i++) {
filter_s8[i] = vld1_s8(filter_ptr + 8 * i);
}
int16x8_t filter[2];
for (int i = 0; i < 2; i++) {
filter[i] = vmovl_s8(filter_s8[i]);
}
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
int8 input_s8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_s8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
for (int i = 0; i < 2; i++) {
acc[2 * i + 0] =
vmlal_n_s16(acc[2 * i + 0], vget_low_s16(filter[i]), input);
acc[2 * i + 1] =
vmlal_n_s16(acc[2 * i + 1], vget_high_s16(filter[i]), input);
}
// Store the accumulators back to acc_buffer
for (int i = 0; i < 4; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 16;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 32> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8_0 = vld1_s8(filter_ptr + 8 * 0);
int8x8_t filter_s8_1 = vld1_s8(filter_ptr + 8 * 1);
int8x8_t filter_s8_2 = vld1_s8(filter_ptr + 8 * 2);
int8x8_t filter_s8_3 = vld1_s8(filter_ptr + 8 * 3);
int16x8_t filter_0 = vmovl_s8(filter_s8_0);
int16x8_t filter_1 = vmovl_s8(filter_s8_1);
int16x8_t filter_2 = vmovl_s8(filter_s8_2);
int16x8_t filter_3 = vmovl_s8(filter_s8_3);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
int8 input_s8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_s8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
int32x4_t acc_3 = vld1q_s32(acc_buffer_ptr + 4 * 3);
int32x4_t acc_4 = vld1q_s32(acc_buffer_ptr + 4 * 4);
int32x4_t acc_5 = vld1q_s32(acc_buffer_ptr + 4 * 5);
int32x4_t acc_6 = vld1q_s32(acc_buffer_ptr + 4 * 6);
int32x4_t acc_7 = vld1q_s32(acc_buffer_ptr + 4 * 7);
// Multiply-accumulate
acc_0 = vmlal_n_s16(acc_0, vget_low_s16(filter_0), input);
acc_1 = vmlal_n_s16(acc_1, vget_high_s16(filter_0), input);
acc_2 = vmlal_n_s16(acc_2, vget_low_s16(filter_1), input);
acc_3 = vmlal_n_s16(acc_3, vget_high_s16(filter_1), input);
acc_4 = vmlal_n_s16(acc_4, vget_low_s16(filter_2), input);
acc_5 = vmlal_n_s16(acc_5, vget_high_s16(filter_2), input);
acc_6 = vmlal_n_s16(acc_6, vget_low_s16(filter_3), input);
acc_7 = vmlal_n_s16(acc_7, vget_high_s16(filter_3), input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_s32(acc_buffer_ptr + 4 * 3, acc_3);
vst1q_s32(acc_buffer_ptr + 4 * 4, acc_4);
vst1q_s32(acc_buffer_ptr + 4 * 5, acc_5);
vst1q_s32(acc_buffer_ptr + 4 * 6, acc_6);
vst1q_s32(acc_buffer_ptr + 4 * 7, acc_7);
acc_buffer_ptr += 32;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 20> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
// NEON wants to load 8 bytes at a time, but 20 is not divisible by 8.
// We load the first 16 bytes into filter_s8_{0,1} as usual.
// Then we load the 8 last bytes into filter_s8_x (x for 'extra').
// This is redundant: the first 4 bytes of filter_s8_x are the same
// as the last 4 bytes of filter_s8_x.
int8x8_t filter_s8_0 = vld1_s8(filter_ptr + 8 * 0);
int8x8_t filter_s8_1 = vld1_s8(filter_ptr + 8 * 1);
int8x8_t filter_s8_x = vld1_s8(filter_ptr + 8 * 1 + 4);
int16x8_t filter_0 = vmovl_s8(filter_s8_0);
int16x8_t filter_1 = vmovl_s8(filter_s8_1);
int16x8_t filter_x = vmovl_s8(filter_s8_x);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
int8 input_s8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_s8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
int32x4_t acc_3 = vld1q_s32(acc_buffer_ptr + 4 * 3);
int32x4_t acc_4 = vld1q_s32(acc_buffer_ptr + 4 * 4);
// Multiply-accumulate
acc_0 = vmlal_n_s16(acc_0, vget_low_s16(filter_0), input);
acc_1 = vmlal_n_s16(acc_1, vget_high_s16(filter_0), input);
acc_2 = vmlal_n_s16(acc_2, vget_low_s16(filter_1), input);
acc_3 = vmlal_n_s16(acc_3, vget_high_s16(filter_1), input);
acc_4 = vmlal_n_s16(acc_4, vget_high_s16(filter_x), input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
vst1q_s32(acc_buffer_ptr + 4 * 3, acc_3);
vst1q_s32(acc_buffer_ptr + 4 * 4, acc_4);
acc_buffer_ptr += 20;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 1, 8> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
const int8x8_t filter_s8 = vld1_s8(filter_ptr);
const int16x8_t filter = vmovl_s8(filter_s8);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
int8 input_s8 = *input_ptr;
input_ptr += input_ptr_increment;
int16 input = static_cast<int16>(input_s8 + input_offset);
// Load the accumulators from acc_buffer
int32x4_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i] = vld1q_s32(acc_buffer_ptr + 4 * i);
}
// Multiply-accumulate
acc[0] = vmlal_n_s16(acc[0], vget_low_s16(filter), input);
acc[1] = vmlal_n_s16(acc[1], vget_high_s16(filter), input);
// Store the accumulators back to acc_buffer
for (int i = 0; i < 2; i++) {
vst1q_s32(acc_buffer_ptr + 4 * i, acc[i]);
}
acc_buffer_ptr += 8;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 2, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle 2 output pixels at a time.
for (; outp <= num_output_pixels - 2; outp += 2) {
// Load the accumulators from acc_buffer.
int32x4_t acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int16x4_t input_s16 = vdup_n_s16(0);
input_s16 = vset_lane_s16((reinterpret_cast<const int16*>(input_ptr))[0],
input_s16, 0);
input_ptr += input_ptr_increment;
input_s16 = vset_lane_s16((reinterpret_cast<const int16*>(input_ptr))[0],
input_s16, 1);
input_ptr += input_ptr_increment;
input_s16 = vget_low_s16(vmovl_s8(vreinterpret_s8_s16(input_s16)));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer.
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle 1 output pixel at a time.
for (; outp < num_output_pixels; outp++) {
// Load the accumulators from acc_buffer.
int32x2_t acc = vld1_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_ptr += input_ptr_increment;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate.
acc = vget_low_s32(vmlal_s16(vcombine_s32(acc, acc), filter, input));
// Store the accumulators back to acc_buffer.
vst1_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 2;
}
}
};
template <>
struct QuantizedDepthwiseConvKernel<true, 4, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
if (num_output_pixels <= 0) {
return;
}
// Load the filters.
int8x8_t filter_s8 = vdup_n_s8(0);
filter_s8 = vset_lane_s8(filter_ptr[0], filter_s8, 0);
filter_s8 = vset_lane_s8(filter_ptr[1], filter_s8, 1);
filter_s8 = vset_lane_s8(filter_ptr[2], filter_s8, 2);
filter_s8 = vset_lane_s8(filter_ptr[3], filter_s8, 3);
const int16x4_t filter = vget_low_s16(vmovl_s8(filter_s8));
int outp = 0;
// Handle one output pixel at a time until second to the last pixel. Second
// to the last because we read eight input pixels while only processing
// four.
for (; outp < num_output_pixels - 1; outp++) {
// Load the accumulators from acc_buffer
int32x4_t acc;
acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int8x8_t input_s8 = vld1_s8(input_ptr);
input_ptr += input_ptr_increment;
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
acc_buffer_ptr += 4;
}
// Handle the last output pixel.
// Load the accumulators from acc_buffer
int32x4_t acc;
acc = vld1q_s32(acc_buffer_ptr);
// Load the inputs, add input_offset.
int8x8_t input_s8 = vdup_n_s8(0);
input_s8 = vset_lane_s8(input_ptr[0], input_s8, 0);
input_s8 = vset_lane_s8(input_ptr[1], input_s8, 1);
input_s8 = vset_lane_s8(input_ptr[2], input_s8, 2);
input_s8 = vset_lane_s8(input_ptr[3], input_s8, 3);
const int16x4_t input_s16 = vget_low_s16(vmovl_s8(input_s8));
const int16x4_t input = vadd_s16(input_s16, vdup_n_s16(input_offset));
// Multiply-accumulate
acc = vmlal_s16(acc, filter, input);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr, acc);
}
};
template <>
struct QuantizedDepthwiseConvKernel<false, 12, 1> {
static void Run(int num_output_pixels, int input_depth, int depth_multiplier,
const int8* input_ptr, int16 input_offset,
int input_ptr_increment, const int8* filter_ptr,
int32* acc_buffer_ptr) {
// Load the filters.
int8x8_t filter_s8_0 = vld1_s8(filter_ptr);
int8x8_t filter_s8_1 = vld1_s8(filter_ptr + 4);
int16x8_t filter_s16_0 = vmovl_s8(filter_s8_0);
int16x8_t filter_s16_1 = vmovl_s8(filter_s8_1);
int16x4_t filter_0 = vget_low_s16(filter_s16_0);
int16x4_t filter_1 = vget_high_s16(filter_s16_0);
int16x4_t filter_2 = vget_high_s16(filter_s16_1);
// Handle one output pixel at a time.
for (int outp = 0; outp < num_output_pixels; outp++) {
// Load the inputs, add input_offset.
int8x8_t input_s8_0 = vld1_s8(input_ptr);
int8x8_t input_s8_1 = vld1_s8(input_ptr + 4);
input_ptr += input_ptr_increment;
int16x8_t input_0 = vmovl_s8(input_s8_0);
int16x8_t input_1 = vmovl_s8(input_s8_1);
input_0 = vaddq_s16(input_0, vdupq_n_s16(input_offset));
input_1 = vaddq_s16(input_1, vdupq_n_s16(input_offset));
// Load the accumulators from acc_buffer
int32x4_t acc_0 = vld1q_s32(acc_buffer_ptr + 4 * 0);
int32x4_t acc_1 = vld1q_s32(acc_buffer_ptr + 4 * 1);
int32x4_t acc_2 = vld1q_s32(acc_buffer_ptr + 4 * 2);
// Multiply-accumulate
acc_0 = vmlal_s16(acc_0, vget_low_s16(input_0), filter_0);
acc_1 = vmlal_s16(acc_1, vget_high_s16(input_0), filter_1);
acc_2 = vmlal_s16(acc_2, vget_high_s16(input_1), filter_2);
// Store the accumulators back to acc_buffer
vst1q_s32(acc_buffer_ptr + 4 * 0, acc_0);
vst1q_s32(acc_buffer_ptr + 4 * 1, acc_1);
vst1q_s32(acc_buffer_ptr + 4 * 2, acc_2);
acc_buffer_ptr += 12;
}
}
};
#endif
// Accumulates the effect of one row of the filter, on a segment of one row
// of the output, accessing the corresponding one row of the input.
template <bool kAllowStrided, int kFixedInputDepth, int kFixedDepthMultiplier>
void QuantizedDepthwiseConvAccumRow(int stride, int dilation_factor,
int input_depth, int input_width,
const int8* input_data, int16 input_offset,
int pad_width, int depth_multiplier,
int filter_width, const int8* filter_data,
int out_x_buffer_start,
int out_x_buffer_end, int output_depth,
int32* acc_buffer) {
ruy::profiler::ScopeLabel label(__PRETTY_FUNCTION__);
// Consistency check parameters. This is important in particular to ensure
// that we keep the number of template instantiations minimal, so we don't
// increase binary size unnecessarily.
static_assert(kFixedDepthMultiplier || !kFixedInputDepth, "");
static_assert(kFixedInputDepth || kAllowStrided, "");
TFLITE_DCHECK(stride == 1 || kAllowStrided);
if (kFixedInputDepth) {
TFLITE_DCHECK_EQ(input_depth, kFixedInputDepth);
}
if (kFixedDepthMultiplier) {
TFLITE_DCHECK_EQ(depth_multiplier, kFixedDepthMultiplier);
}
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
const int input_ptr_increment = stride * input_depth;
const int8* filter_base_ptr = filter_data;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
// For the current (filter_x, filter_y) point in the filter,
// compute the boundaries of the corresponding output row segment.
int out_x_loop_start_unclamped = 0;
int out_x_loop_end_unclamped = 0;
if (kAllowStrided) {
if (stride == 2) {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + 1) / 2;
out_x_loop_end_unclamped =
(pad_width + input_width - dilation_factor * filter_x + 1) / 2;
} else if (stride == 4) {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + 3) / 4;
out_x_loop_end_unclamped =
(pad_width + input_width - dilation_factor * filter_x + 3) / 4;
} else {
out_x_loop_start_unclamped =
(pad_width - dilation_factor * filter_x + stride - 1) / stride;
out_x_loop_end_unclamped = (pad_width + input_width -
dilation_factor * filter_x + stride - 1) /
stride;
}
} else {
out_x_loop_start_unclamped = pad_width - dilation_factor * filter_x;
out_x_loop_end_unclamped =
pad_width + input_width - dilation_factor * filter_x;
}
// The kernel will have to iterate on the segment of the
// output row that starts at out_x_loop_start and out_x_loop_end.
const int out_x_loop_start =
std::max(out_x_buffer_start, out_x_loop_start_unclamped);
const int out_x_loop_end =
std::min(out_x_buffer_end, out_x_loop_end_unclamped);
int32* acc_buffer_ptr =
acc_buffer + (out_x_loop_start - out_x_buffer_start) * output_depth;
const int in_x_origin =
(out_x_loop_start * stride) - pad_width + dilation_factor * filter_x;
const int8* input_ptr = input_data + in_x_origin * input_depth;
const int num_output_pixels = out_x_loop_end - out_x_loop_start;
QuantizedDepthwiseConvKernel<
kAllowStrided, kFixedInputDepth,
kFixedDepthMultiplier>::Run(num_output_pixels, input_depth,
depth_multiplier, input_ptr, input_offset,
input_ptr_increment, filter_base_ptr,
acc_buffer_ptr);
filter_base_ptr += output_depth;
}
}
// generic fallback of DepthwiseConvAccumRow, portable, non-templatized.
inline void QuantizedDepthwiseConvAccumRowGeneric(
int stride, int dilation_factor, int input_depth, int input_width,
const int8* input_data, int16 input_offset, int pad_width,
int depth_multiplier, int filter_width, const int8* filter_data,
int out_x_buffer_start, int out_x_buffer_end, int output_depth,
int32* acc_buffer) {
ruy::profiler::ScopeLabel label("DepthwiseConvAccumRowGeneric (slow)");
const int8* filter_base_ptr = filter_data;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int out_x_loop_start = std::max(
out_x_buffer_start,
(pad_width - dilation_factor * filter_x + stride - 1) / stride);
const int out_x_loop_end = std::min(
out_x_buffer_end,
(pad_width + input_width - dilation_factor * filter_x + stride - 1) /
stride);
int32* acc_buffer_ptr =
acc_buffer + (out_x_loop_start - out_x_buffer_start) * output_depth;
const int in_x_origin =
(out_x_loop_start * stride) - pad_width + dilation_factor * filter_x;
const int8* input_ptr = input_data + in_x_origin * input_depth;
const int input_ptr_increment = (stride - 1) * input_depth;
for (int out_x = out_x_loop_start; out_x < out_x_loop_end; out_x++) {
const int8* filter_ptr = filter_base_ptr;
for (int ic = 0; ic < input_depth; ++ic) {
const int16 input_val = *input_ptr++ + input_offset;
for (int m = 0; m < depth_multiplier; m++) {
const int16 filter_val = *filter_ptr++;
*acc_buffer_ptr++ += static_cast<int32>(filter_val) * input_val;
}
}
input_ptr += input_ptr_increment;
}
filter_base_ptr += output_depth;
}
}
// Initializes the accumulator buffer with bias values.
inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth,
const int32* bias_data,
int32* acc_buffer) {
int i = 0;
#ifdef USE_NEON
if (output_depth == 1) {
const int32x4_t b = vdupq_n_s32(bias_data[0]);
for (; i <= num_output_pixels - 16; i += 16) {
vst1q_s32(acc_buffer + i + 0, b);
vst1q_s32(acc_buffer + i + 4, b);
vst1q_s32(acc_buffer + i + 8, b);
vst1q_s32(acc_buffer + i + 12, b);
}
for (; i <= num_output_pixels - 4; i += 4) {
vst1q_s32(acc_buffer + i, b);
}
} else if (output_depth == 2) {
int32x4_t b = vdupq_n_s32(bias_data[0]);
b = vsetq_lane_s32(bias_data[1], b, 1);
b = vsetq_lane_s32(bias_data[1], b, 3);
for (; i <= num_output_pixels - 8; i += 8) {
vst1q_s32(acc_buffer + 2 * i + 0, b);
vst1q_s32(acc_buffer + 2 * i + 4, b);
vst1q_s32(acc_buffer + 2 * i + 8, b);
vst1q_s32(acc_buffer + 2 * i + 12, b);
}
for (; i <= num_output_pixels - 2; i += 2) {
vst1q_s32(acc_buffer + 2 * i, b);
}
} else if (output_depth == 4) {
const int32x4_t b = vld1q_s32(bias_data);
for (; i <= num_output_pixels - 4; i += 4) {
vst1q_s32(acc_buffer + 4 * i + 0, b);
vst1q_s32(acc_buffer + 4 * i + 4, b);
vst1q_s32(acc_buffer + 4 * i + 8, b);
vst1q_s32(acc_buffer + 4 * i + 12, b);
}
for (; i < num_output_pixels; i++) {
vst1q_s32(acc_buffer + 4 * i, b);
}
} else if (output_depth == 8) {
const int32x4_t b0 = vld1q_s32(bias_data);
const int32x4_t b1 = vld1q_s32(bias_data + 4);
for (; i <= num_output_pixels - 2; i += 2) {
vst1q_s32(acc_buffer + 8 * i + 0, b0);
vst1q_s32(acc_buffer + 8 * i + 4, b1);
vst1q_s32(acc_buffer + 8 * i + 8, b0);
vst1q_s32(acc_buffer + 8 * i + 12, b1);
}
for (; i < num_output_pixels; i++) {
vst1q_s32(acc_buffer + 8 * i + 0, b0);
vst1q_s32(acc_buffer + 8 * i + 4, b1);
}
} else if (output_depth == 16) {
const int32x4_t b0 = vld1q_s32(bias_data);
const int32x4_t b1 = vld1q_s32(bias_data + 4);
const int32x4_t b2 = vld1q_s32(bias_data + 8);
const int32x4_t b3 = vld1q_s32(bias_data + 12);
for (; i < num_output_pixels; i++) {
vst1q_s32(acc_buffer + 16 * i + 0, b0);
vst1q_s32(acc_buffer + 16 * i + 4, b1);
vst1q_s32(acc_buffer + 16 * i + 8, b2);
vst1q_s32(acc_buffer + 16 * i + 12, b3);
}
}
#endif
for (; i < num_output_pixels; i++) {
memcpy(acc_buffer + i * output_depth, bias_data,
sizeof(acc_buffer[0]) * output_depth);
}
}
inline void DepthwiseConvGeneral(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
int thread_start, int thread_end, int thread_dim) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32 input_offset = params.input_offset;
const int32 output_offset = params.output_offset;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_rows = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
static const int kAccBufferMaxSize = 2048;
int32 acc_buffer[kAccBufferMaxSize];
TFLITE_DCHECK_GE(kAccBufferMaxSize, output_depth);
const int kOutputPixelsInAccBuffer = kAccBufferMaxSize / output_depth;
const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth;
TFLITE_DCHECK_LE(kOutputPixelsInAccBuffer * output_depth,
kAccBufferActualSize);
TFLITE_DCHECK_LE(kAccBufferActualSize, kAccBufferMaxSize);
TFLITE_DCHECK_GE(kOutputPixelsInAccBuffer, 1);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
// row_accum_func will point to the core accumulation function to be used
// for this DepthwiseConv op.
using row_accum_func_t = decltype(&QuantizedDepthwiseConvAccumRowGeneric);
row_accum_func_t row_accum_func = nullptr;
#define TFMINI_USE_DEPTHWISECONV_KERNEL(ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER) \
if (!row_accum_func && (stride_width == 1 || ALLOW_STRIDED) && \
(input_depth == FIXED_INPUT_DEPTH || FIXED_INPUT_DEPTH == 0) && \
depth_multiplier == FIXED_DEPTH_MULTIPLIER) { \
row_accum_func = \
QuantizedDepthwiseConvAccumRow<ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER>; \
}
#ifdef USE_NEON
// We go over our list of kernels by decreasing order of preference
// for the cases where multiple kernels could apply.
// Start with the fastest kernels: AllowStrided=false, fixed input depth.
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 12, 1)
// Next come the strided kernels: AllowStrided=true, fixed input depth.
// They are a bit less efficient, but allow stride!=1.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 16, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 16)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 20)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 32)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 4, 1)
// Finally, the kernels allowing a variable input depth,
// these are the least efficient but most general kernels.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 3)
#endif // USE_NEON
// No matching fast kernel found, use slow fallback.
if (!row_accum_func) {
row_accum_func = QuantizedDepthwiseConvAccumRowGeneric;
}
#undef TFMINI_USE_DEPTHWISECONV_KERNEL
const int input_height_stride = input_shape.Dims(3) * input_shape.Dims(2);
const int input_batch_stride = input_height_stride * input_shape.Dims(1);
const int filter_height_stride = filter_shape.Dims(3) * filter_shape.Dims(2);
// Now that we have determined row_accum_func, we can start work.
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_rows;
int output_ptr_offset = 0;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
output_ptr_offset = batch_start * FlatSizeSkipDim(output_shape, 0);
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_rows);
row_start = thread_start;
row_end = thread_end;
output_ptr_offset = row_start * output_width * output_depth;
break;
}
int8* output_ptr = output_data + output_ptr_offset;
int batch_step =
(output_rows + row_start - row_end) * output_width * output_depth;
for (int b = batch_start; b < batch_end; ++b) {
for (int out_y = row_start; out_y < row_end; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
const int filter_y_start =
std::max(0, (-in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
const int filter_y_end =
std::min(filter_height,
(input_height - in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
for (int out_x_buffer_start = 0; out_x_buffer_start < output_width;
out_x_buffer_start += kOutputPixelsInAccBuffer) {
const int out_x_buffer_end = std::min(
output_width, out_x_buffer_start + kOutputPixelsInAccBuffer);
// We call a 'pixel' a group of activation that share all but the
// 'depth'/'channel' coordinate. num_output_pixels is the number of
// output pixels that we will accumulate in this loop iteration.
const int num_output_pixels = out_x_buffer_end - out_x_buffer_start;
// Initialize our local accumulator with the bias values, so we don't
// have to add them later.
DepthwiseConvInitAccBuffer(num_output_pixels, output_depth, bias_data,
acc_buffer);
// Accumulation loop. Most of the time should be spent in here.
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
row_accum_func(
stride_width, dilation_width_factor, input_depth, input_width,
input_data + in_y * input_height_stride + b * input_batch_stride,
input_offset, pad_width, depth_multiplier, filter_width,
filter_data + filter_y * filter_height_stride, out_x_buffer_start,
out_x_buffer_end, output_depth, acc_buffer);
}
// Finished accumulating int32 values. Now need to convert them to
// the final 8bit form and store them.
ruy::profiler::ScopeLabel label("downquantize+store");
const int num_output_values = output_depth * num_output_pixels;
optimized_ops::Quantize(output_multiplier, output_shift, output_depth,
num_output_values, output_offset,
output_activation_min, output_activation_max,
acc_buffer, output_ptr);
output_ptr += num_output_values;
}
}
output_ptr += batch_step;
}
}
} // namespace depthwise_conv
template <DepthwiseConvOutputRounding kOutputRounding>
inline void DepthwiseConvWithRounding(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
int thread_start, int thread_end, int thread_dim,
const CpuBackendContext& cpu_backend_context) {
ruy::profiler::ScopeLabel label("DepthwiseConvInt8/8bit");
const int depth_multiplier = params.depth_multiplier;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_GE(dilation_width_factor, 1);
TFLITE_DCHECK_GE(dilation_height_factor, 1);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_depth = input_shape.Dims(3);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
#if defined(__ANDROID__) && defined(__clang__)
CpuFlags cpu_flags;
GetCpuFlags(&cpu_flags);
const bool has_dot_product_instructions = cpu_flags.neon_dotprod;
// Dispatch to dot-product 3x3 kernels when supported.
if (has_dot_product_instructions) {
using optimized_ops::depthwise_conv::DotProduct3x3KernelType;
DotProduct3x3KernelType kernel_type =
optimized_ops::depthwise_conv::CategorizeDotProductKernel<
optimized_ops::depthwise_conv::QuantizationType::kPerChannelInt8>(
input_shape, filter_shape, output_shape, params, output_shift);
if (kernel_type != DotProduct3x3KernelType::kNone) {
ruy::profiler::ScopeLabel specialized_label(
"DepthwiseConvInt8/8bit/3x3XDotProduct");
DepthwiseParams params_copy = params;
params_copy.output_shift_per_channel = output_shift;
params_copy.output_multiplier_per_channel = output_multiplier;
optimized_ops::depthwise_conv::DepthwiseConvDotProduct3x3PerChannel<
DepthwiseConvImplementation::kUseNeon3x3DotProduct>(
params_copy, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, thread_start,
thread_end, thread_dim);
return;
}
}
#endif
// Dispatch to non-dot-product 3x3 kernels when supported.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
// Call kernel optimized for depthwise convolutions using 3x3 filters if
// parameters are supported.
if (optimized_ops::depthwise_conv::Fast3x3FilterKernelSupported<
optimized_ops::depthwise_conv::QuantizationType::kPerChannelInt8>(
input_shape, filter_shape, stride_width, stride_height,
dilation_width_factor, dilation_height_factor, pad_width, pad_height,
depth_multiplier, output_shape, 0, output_shift)) {
ruy::profiler::ScopeLabel specialized_label("DepthwiseConvInt8/8bit/3x3");
optimized_ops::depthwise_conv::DepthwiseConv3x3FilterPerChannel<
DepthwiseConvOutputRounding::kUpward>(
params, output_multiplier, output_shift, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, thread_start, thread_end, thread_dim);
return;
}
#endif
ruy::profiler::ScopeLabel specialized_label("DepthwiseConvInt8/8bit/General");
depthwise_conv::DepthwiseConvGeneral(
params, output_multiplier, output_shift, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, thread_start, thread_end, thread_dim);
}
inline void DepthwiseConvImpl(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
int thread_start, int thread_end, int thread_dim,
const CpuBackendContext& cpu_backend_context) {
return DepthwiseConvWithRounding<DepthwiseConvOutputRounding::kAwayFromZero>(
params, output_multiplier, output_shift, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, thread_start, thread_end, thread_dim, cpu_backend_context);
}
template <typename T, typename TS>
struct DepthwiseConvWorkerTask : cpu_backend_threadpool::Task {
DepthwiseConvWorkerTask(const DepthwiseParams& params,
const int32* output_multiplier,
const int32* output_shift,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& filter_shape,
const T* filter_data, const RuntimeShape& bias_shape,
const TS* bias_data, const RuntimeShape& output_shape,
T* output_data, int thread_start, int thread_end,
int thread_dim,
const CpuBackendContext& cpu_backend_context_x)
: params_(params),
output_multiplier_(output_multiplier),
output_shift_(output_shift),
input_shape_(input_shape),
input_data_(input_data),
filter_shape_(filter_shape),
filter_data_(filter_data),
bias_shape_(bias_shape),
bias_data_(bias_data),
output_shape_(output_shape),
output_data_(output_data),
thread_start_(thread_start),
thread_end_(thread_end),
thread_dim_(thread_dim),
cpu_backend_context(cpu_backend_context_x) {}
void Run() override {
DepthwiseConvImpl(params_, output_multiplier_, output_shift_, input_shape_,
input_data_, filter_shape_, filter_data_, bias_shape_,
bias_data_, output_shape_, output_data_, thread_start_,
thread_end_, thread_dim_, cpu_backend_context);
}
private:
const DepthwiseParams& params_;
const int32* output_multiplier_;
const int32* output_shift_;
const RuntimeShape& input_shape_;
const T* input_data_;
const RuntimeShape& filter_shape_;
const T* filter_data_;
const RuntimeShape& bias_shape_;
const TS* bias_data_;
const RuntimeShape& output_shape_;
T* output_data_;
int thread_start_;
int thread_end_;
int thread_dim_;
const CpuBackendContext& cpu_backend_context;
};
inline int HowManyConvThreads(const RuntimeShape& output_shape,
const RuntimeShape& filter_shape,
int thread_dim) {
constexpr int kMinMulPerThread = 8;
const int output_units = output_shape.Dims(thread_dim);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int num_mul_per_unit =
FlatSizeSkipDim(output_shape, thread_dim) * filter_height * filter_width;
const int min_units_per_thread = kMinMulPerThread / num_mul_per_unit + 1;
int thread_count = output_units / min_units_per_thread;
return thread_count;
}
inline void DepthwiseConvPerChannel(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("DepthwiseConvInt8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_batches = output_shape.Dims(0);
const int output_rows = output_shape.Dims(1);
int thread_count_batch = HowManyConvThreads(output_shape, filter_shape, 0);
int thread_count_row = HowManyConvThreads(output_shape, filter_shape, 1);
int thread_dim, thread_count, thread_dim_size;
if (thread_count_batch > thread_count_row) {
thread_dim = 0;
thread_dim_size = output_batches;
thread_count = thread_count_batch;
} else {
thread_dim = 1;
thread_dim_size = output_rows;
thread_count = thread_count_row;
}
const int max_threads = cpu_backend_context->max_num_threads();
thread_count = std::max(1, std::min(thread_count, max_threads));
if (thread_count == 1) {
DepthwiseConvImpl(params, output_multiplier, output_shift, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, /*thread_start=*/0,
/*thread_end=*/output_rows, /*thread_dim=*/1,
*cpu_backend_context);
} else {
std::vector<DepthwiseConvWorkerTask<int8, int32>> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks.emplace_back(params, output_multiplier, output_shift, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start,
thread_end, thread_dim, *cpu_backend_context);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h | C++ | apache-2.0 | 85,929 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_3X3_FILTER_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_3X3_FILTER_H_
#include <memory>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
namespace depthwise_conv {
#define STR(s) STR_UNEXPANDED(s)
#define STR_UNEXPANDED(s) #s
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
#include <stddef.h>
// Represents the number of bytes offset from the start of the
// DepthwiseConvParams struct. This is used in the asm to load parameters.
// Keep these values in sync with the static_asserts below.
#define OFFSET_INPUT_DEPTH 0
#define OFFSET_INPUT_ROW_SIZE 8
#define OFFSET_OUTPUT_DEPTH 16
#define OFFSET_OUTPUT_ROW_SIZE 24
#define OFFSET_FILTER_ROW_SIZE 32
#define OFFSET_INPUT_OFFSET 40
#define OFFSET_OUTPUT_OFFSET 44
#define OFFSET_OUTPUT_MULTIPLIER 52
#define OFFSET_OUTPUT_ACTIVATION_MIN 56
#define OFFSET_OUTPUT_ACTIVATION_MAX 60
#define OFFSET_OUTPUT_RIGHT_SHIFT 64
#define OFFSET_INPUT_WIDTH 68
#define OFFSET_INPUT_HEIGHT 72
#define OFFSET_STRIDE_WIDTH 76
#define OFFSET_STRIDE_HEIGHT 80
#define OFFSET_OUTPUT_WIDTH 84
#define OFFSET_OUTPUT_HEIGHT 88
static_assert(offsetof(DepthwiseConvParams, input_depth) == OFFSET_INPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvParams, input_row_size) ==
OFFSET_INPUT_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, output_depth) ==
OFFSET_OUTPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvParams, output_row_size) ==
OFFSET_OUTPUT_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, filter_row_size) ==
OFFSET_FILTER_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, input_offset) ==
OFFSET_INPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, output_offset) ==
OFFSET_OUTPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, output_multiplier) ==
OFFSET_OUTPUT_MULTIPLIER,
"");
static_assert(offsetof(DepthwiseConvParams, output_activation_min) ==
OFFSET_OUTPUT_ACTIVATION_MIN,
"");
static_assert(offsetof(DepthwiseConvParams, output_activation_max) ==
OFFSET_OUTPUT_ACTIVATION_MAX,
"");
static_assert(offsetof(DepthwiseConvParams, output_right_shift) ==
OFFSET_OUTPUT_RIGHT_SHIFT,
"");
static_assert(offsetof(DepthwiseConvParams, input_width) == OFFSET_INPUT_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, input_height) ==
OFFSET_INPUT_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, stride_width) ==
OFFSET_STRIDE_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, stride_height) ==
OFFSET_STRIDE_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, output_width) ==
OFFSET_OUTPUT_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, output_height) ==
OFFSET_OUTPUT_HEIGHT,
"");
template <>
struct DepthwiseConvWindowPerChannel<DepthwiseConvOutputRounding::kUpward, 8, 1,
1> {
public:
static inline void Run(const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const int8* input_ptr,
const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr, int64_t input_depth,
int64_t input_row_size, int32 output_window_height,
int32 output_window_width,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 2 * input_depth;
const int64_t input_height_increment = 2 * input_row_size;
const int64_t output_height_increment = 2 * params_ptr->output_row_size;
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time, load inputs for a 2x1 (2
// height, 1 width) output window (4x3 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 2x1 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time, load inputs for a 1x2 (1
// height, 2 width) output window (3x4 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 1x2 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
//
// This logic is copied and modified from the non-per-channel quantized
// part.
// However, the challenges are how to plan the registers allocation
// wisely: 25 NEON registers are already reserved for inputs, filters,
// and outputs; also, 2 registers (v30, v31) are used for output
// min/max, while another 2 registers (v26, v29) are used for input
// offset & output offset, so that's total 25 + 2 + 2 = 29 already.
// But we need 4 more registers to hold the output multiplier & output
// right shift (we only have 3).
//
// So here's the plan:
// v27 (which held duplicated output multiplier previously) will hold
// the first 4 values of the output_multiplier_ptr (we have 8 in total);
// v30 (which held duplicated output right shift previously) will hold
// the first 4 values of the output_shift_ptr (we have 8 in total);
// lastly, v28 will hold the last 4 values of output_multiplier and v31
// (previously occupied by activations) will hold the last 4 values of
// output_shift. Then v25 will be used for output activation min while
// output activation max will just reuse other registers, like v24.
//
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x3, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"cmp %w[output_window_height], #2\n"
"dup v26.8h, w9\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v29.8h, w2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w0, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"add x10, %[bias_ptr], #16\n"
"ldr x1, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"dup v25.16b, w4\n"
// Deal with output multiplier & output shift.
"ld1 {v27.4s, v28.4s}, [%[output_multiplier_ptr]]\n"
"ld1 {v30.4s, v31.4s}, [%[output_shift_ptr]]\n"
// Load filters and add offsets.
"ld1 {v0.8b}, [%[filter_ptr]], x3\n"
"ld1 {v1.8b}, [%[filter_ptr]], x3\n"
"sshll v0.8h, v0.8b, #0\n"
"ld1 {v2.8b}, [%[filter_ptr]], x3\n"
"sshll v1.8h, v1.8b, #0\n"
"ld1 {v3.8b}, [%[filter_ptr]], x3\n"
"sshll v2.8h, v2.8b, #0\n"
"ld1 {v4.8b}, [%[filter_ptr]], x3\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v5.8b}, [%[filter_ptr]], x3\n"
"sshll v4.8h, v4.8b, #0\n"
"ld1 {v6.8b}, [%[filter_ptr]], x3\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v7.8b}, [%[filter_ptr]], x3\n"
"sshll v6.8h, v6.8b, #0\n"
"ld1 {v8.8b}, [%[filter_ptr]], x3\n"
"sshll v7.8h, v7.8b, #0\n"
"sshll v8.8h, v8.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// This loop processes 2x2 outputs. To avoid register exhaustion,
// inputs for the left 2 outputs are loaded first, then the right
// two outputs.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, x11, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 2x1 outputs (2 height,
// 1 width) in anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// loads, otherwise jump to specific the appropriate label to handle
// smaller widths.
"cmp w5, #2\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v22.4s}, [x10]\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"subs w5, w5, #2\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"cmp w5, #3\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"ld1 {v22.4s}, [x10]\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"mov x12, x11\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"add x13, x11, %[input_row_size]\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"add x14, x13, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"ld1 {v22.4s}, [x10]\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"ld1 {v22.4s}, [x10]\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"st1 {v23.8b}, [x7], x3\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"st1 {v21.8b}, [x6], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x7], x3\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x12, %[input_ptr]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w5, #2\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"ld1 {v22.4s}, [x10]\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
// Load inputs for 3x4 input window which corresponds to a 1x2 output
// window.
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"subs w5, w5, #2\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"cmp w5, #3\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"add %[input_ptr], %[input_ptr], %[input_width_increment]\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"mov x12, %[input_ptr]\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"add x14, x13, %[input_row_size]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"ld1 {v22.4s}, [x10]\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"st1 {v21.8b}, [%[output_ptr]], x3\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [%[output_ptr]], x3\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrdmulh v23.4s, v23.4s, v27.4s\n"
"sqrdmulh v24.4s, v24.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v30.4s\n"
"sqrshl v24.4s, v24.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v24.16b, w0\n"
"smax v21.16b, v21.16b, v25.16b\n"
"smin v21.16b, v21.16b, v24.16b\n"
"st1 {v21.8b}, [%[output_ptr]], x3\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [%[output_ptr]], x3\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"sqrdmulh v21.4s, v21.4s, v27.4s\n"
"sqrdmulh v22.4s, v22.4s, v28.4s\n"
"sqrshl v21.4s, v21.4s, v30.4s\n"
"sqrshl v22.4s, v22.4s, v31.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"dup v24.16b, w0\n"
"smax v21.8b, v21.8b, v25.8b\n"
"smin v21.8b, v21.8b, v24.8b\n"
"st1 {v21.8b}, [%[output_ptr]]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[output_multiplier_ptr] "r"(output_multiplier_ptr),
[output_shift_ptr] "r"(output_shift_ptr),
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvWindowPerChannel<DepthwiseConvOutputRounding::kUpward, 8, 2,
2> {
static inline void Run(const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const int8* input_ptr,
const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr, int64_t input_depth,
int64_t input_row_size, int32 output_window_height,
int32 output_window_width,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 4 * input_depth;
const int64_t input_height_increment = 4 * input_row_size;
const int64_t output_height_increment = 2 * params_ptr->output_row_size;
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time at stride 2, a 5x5 input
// window is required. To avoid register exhaustion, we load
// the first 2 rows of the 5x5 input window into registers
// v9--v18, and use the same registers to load the next 2
// rows, and finally v9--v13 to load the last row.
// Accumulators for all 2x2 outputs are reserved by registers
// v21-v22 (top left output), v23-v24 (top right output),
// v19-v20 (bottom left output), v25-v26 (bottom right
// output).
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time at stride 2, load inputs for
// a 1x2 (1 height, 2 width) output window (3x5 input
// window). Registers v9--v24 hold input values. Mul-add with
// accumulators v24--v27.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
//
// This logic is copied and modified from the non-per-channel quantized
// part.
// The register planning here is really tricky:
// v0-v29 are all used at least once for either filter/input/output,
// some of them are used for output shift and output multiplier, or
// input/output offset.
// Only v30 & v31 are only used for output activation min/max.
// For per-channel case, we need 4 registers to hold output shift &
// output multiplier. However, given the reality, we simply cannot do
// that without reloading.
//
// So here's the plan:
// We hold output_multiplier in v30 & v31, and we will load output_shift
// into two consecutive registers each time before use.
// We will duplicate output min & max before needed.
// Sometimes we may borrow registers from input offset or bias, we will
// dup them back after use.
//
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w0, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"cmp %w[output_window_height], #2\n"
"dup v28.8h, w0\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"ldr w3, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"dup v29.8h, w2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr x19, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
// Deal with output multiplier.
"ld1 {v30.4s, v31.4s}, [%[output_multiplier_ptr]]\n"
// Load filters and add offsets.
"add x10, %[bias_ptr], #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], x5\n"
"ld1 {v1.8b}, [%[filter_ptr]], x5\n"
"sshll v0.8h, v0.8b, #0\n"
"ld1 {v2.8b}, [%[filter_ptr]], x5\n"
"sshll v1.8h, v1.8b, #0\n"
"ld1 {v3.8b}, [%[filter_ptr]], x5\n"
"sshll v2.8h, v2.8b, #0\n"
"ld1 {v4.8b}, [%[filter_ptr]], x5\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v5.8b}, [%[filter_ptr]], x5\n"
"sshll v4.8h, v4.8b, #0\n"
"ld1 {v6.8b}, [%[filter_ptr]], x5\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v7.8b}, [%[filter_ptr]], x5\n"
"sshll v6.8h, v6.8b, #0\n"
"ld1 {v8.8b}, [%[filter_ptr]]\n"
"sshll v7.8h, v7.8b, #0\n"
"sshll v8.8h, v8.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// Load the first two rows of the 5x5 input window, then reuse the
// same registers to load subsequent rows as they become available.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 1 output horizontally in
// anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// load, otherwise jump to the appropriate label to handle smaller
// widths.
"cmp w14, #2\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x19\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"ld1 {v22.4s}, [x10]\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"ld1 {v24.4s}, [x10]\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"ld1 {v19.4s}, [%[bias_ptr]]\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v20.4s}, [x10]\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v25.4s}, [%[bias_ptr]]\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"ld1 {v26.4s}, [x10]\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"subs w14, w14, #2\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"cmp w14, #3\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"mov x12, x11\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v27.4s, v28.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v21.4s, v21.4s, v30.4s\n"
"sqrdmulh v22.4s, v22.4s, v31.4s\n"
"sqrdmulh v23.4s, v23.4s, v30.4s\n"
"sqrdmulh v24.4s, v24.4s, v31.4s\n"
"sqrshl v21.4s, v21.4s, v27.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v27.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v27.16b, w3\n"
"dup v29.16b, w4\n"
"ld1 {v22.4s}, [x10]\n"
"smax v21.16b, v21.16b, v27.16b\n"
"smin v21.16b, v21.16b, v29.16b\n"
"ld1 {v24.4s}, [x10]\n"
"dup v29.8h, w2\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x5\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x6], x5\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"ld1 {v21.4s}, [%[bias_ptr]]\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"ld1 {v23.4s}, [%[bias_ptr]]\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
"ld1 {v27.4s, v28.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v19.4s, v19.4s, v30.4s\n"
"sqrdmulh v20.4s, v20.4s, v31.4s\n"
"sqrdmulh v25.4s, v25.4s, v30.4s\n"
"sqrdmulh v26.4s, v26.4s, v31.4s\n"
"sqrshl v19.4s, v19.4s, v27.4s\n"
"sqrshl v20.4s, v20.4s, v28.4s\n"
"sqrshl v25.4s, v25.4s, v27.4s\n"
"sqrshl v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v19.8h, v19.8h, v29.8h\n"
"sqadd v25.8h, v25.8h, v29.8h\n"
"sqxtn v19.8b, v19.8h\n"
"sqxtn2 v19.16b, v25.8h\n"
"dup v27.16b, w3\n"
"dup v29.16b, w4\n"
"ld1 {v20.4s}, [x10]\n"
"smax v19.16b, v19.16b, v27.16b\n"
"smin v19.16b, v19.16b, v29.16b\n"
"ld1 {v26.4s}, [x10]\n"
"dup v29.8h, w2\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"st1 {v19.8b}, [x7], x5\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"mov v25.d[0], v19.d[1]\n"
"st1 {v25.8b}, [x7], x5\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v19.4s}, [%[bias_ptr]]\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v25.4s}, [%[bias_ptr]]\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"ld1 {v27.4s, v28.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v21.4s, v21.4s, v30.4s\n"
"sqrdmulh v22.4s, v22.4s, v31.4s\n"
"sqrdmulh v23.4s, v23.4s, v30.4s\n"
"sqrdmulh v24.4s, v24.4s, v31.4s\n"
"sqrshl v21.4s, v21.4s, v27.4s\n"
"sqrshl v22.4s, v22.4s, v28.4s\n"
"sqrshl v23.4s, v23.4s, v27.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"sqxtn2 v21.16b, v23.8h\n"
"dup v27.16b, w3\n"
"dup v29.16b, w4\n"
"ld1 {v22.4s}, [x10]\n"
"smax v21.16b, v21.16b, v27.16b\n"
"smin v21.16b, v21.16b, v29.16b\n"
"ld1 {v24.4s}, [x10]\n"
"dup v29.8h, w2\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6], x5\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"mov v23.d[0], v21.d[1]\n"
"st1 {v23.8b}, [x6]\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
"ld1 {v27.4s, v28.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v19.4s, v19.4s, v30.4s\n"
"sqrdmulh v20.4s, v20.4s, v31.4s\n"
"sqrdmulh v25.4s, v25.4s, v30.4s\n"
"sqrdmulh v26.4s, v26.4s, v31.4s\n"
"sqrshl v19.4s, v19.4s, v27.4s\n"
"sqrshl v20.4s, v20.4s, v28.4s\n"
"sqrshl v25.4s, v25.4s, v27.4s\n"
"sqrshl v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"sqxtn v19.4h, v19.4s\n"
"sqxtn2 v19.8h, v20.4s\n"
"sqxtn v25.4h, v25.4s\n"
"sqxtn2 v25.8h, v26.4s\n"
"sqadd v19.8h, v19.8h, v29.8h\n"
"sqadd v25.8h, v25.8h, v29.8h\n"
"dup v27.16b, w3\n"
"dup v29.16b, w4\n"
"sqxtn v19.8b, v19.8h\n"
"sqxtn2 v19.16b, v25.8h\n"
"smax v19.16b, v19.16b, v27.16b\n"
"smin v19.16b, v19.16b, v29.16b\n"
"st1 {v19.8b}, [x7], x5\n"
"dup v29.8h, w2\n"
"mov v25.d[0], v19.d[1]\n"
"st1 {v25.8b}, [x7]\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
// Handle last column if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
// Registers v9, v10, v11, v14, v15, and v16 have already been loaded
// with the correct values at this point. This corresponds to the
// first two input rows of the top left output. Now load the last
// input row for this output. Once these inputs are no longer needed,
// load the input rows for the bottom left output.
"add x12, x15, %[input_row_size]\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v13.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v17.8b}, [x15]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v21.4s, v6.4h, v12.4h\n"
"smlal2 v22.4s, v6.8h, v12.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v7.4h, v13.4h\n"
"smlal2 v22.4s, v7.8h, v13.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v2.4h, v17.4h\n"
"smlal2 v24.4s, v2.8h, v17.8h\n"
"ld1 {v26.4s, v27.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v21.4s, v21.4s, v30.4s\n"
"sqrdmulh v22.4s, v22.4s, v31.4s\n"
"sqrshl v21.4s, v21.4s, v26.4s\n"
"sqrshl v22.4s, v22.4s, v27.4s\n"
"sqxtn v21.4h, v21.4s\n"
"sqxtn2 v21.8h, v22.4s\n"
"dup v26.16b, w3\n"
"dup v27.16b, w4\n"
"sqadd v21.8h, v21.8h, v29.8h\n"
"sqxtn v21.8b, v21.8h\n"
"smax v21.8b, v21.8b, v26.8b\n"
"smin v21.8b, v21.8b, v27.8b\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"st1 {v21.8b}, [x6]\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"smlal v23.4s, v3.4h, v9.4h\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v24.4s, v3.8h, v9.8h\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"smlal v23.4s, v4.4h, v10.4h\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v24.4s, v4.8h, v10.8h\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"smlal v23.4s, v5.4h, v11.4h\n"
"smlal2 v24.4s, v5.8h, v11.8h\n"
"smlal v23.4s, v6.4h, v14.4h\n"
"smlal2 v24.4s, v6.8h, v14.8h\n"
"smlal v23.4s, v7.4h, v15.4h\n"
"smlal2 v24.4s, v7.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v16.4h\n"
"smlal2 v24.4s, v8.8h, v16.8h\n"
"ld1 {v26.4s, v27.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v23.4s, v23.4s, v30.4s\n"
"sqrdmulh v24.4s, v24.4s, v31.4s\n"
"sqrshl v23.4s, v23.4s, v26.4s\n"
"sqrshl v24.4s, v24.4s, v27.4s\n"
"sqxtn v23.4h, v23.4s\n"
"sqxtn2 v23.8h, v24.4s\n"
"dup v26.16b, w3\n"
"dup v27.16b, w4\n"
"sqadd v23.8h, v23.8h, v29.8h\n"
"sqxtn v23.8b, v23.8h\n"
"smax v23.8b, v23.8b, v26.8b\n"
"smin v23.8b, v23.8b, v27.8b\n"
"st1 {v23.8b}, [x7]\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w14, #2\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"ld1 {v24.4s}, [%[bias_ptr]]\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"ld1 {v25.4s}, [x10]\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"ld1 {v26.4s}, [%[bias_ptr]]\n"
"ld1 {v27.4s}, [x10]\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"subs w14, w14, #2\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"cmp w14, #3\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"mov x12, x11\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"add x13, x12, %[input_row_size]\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"add x15, x13, %[input_row_size]\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"saddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"saddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"saddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"saddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"saddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"ld1 {v28.4s, v29.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v24.4s, v24.4s, v30.4s\n"
"sqrdmulh v25.4s, v25.4s, v31.4s\n"
"sqrdmulh v26.4s, v26.4s, v30.4s\n"
"sqrdmulh v27.4s, v27.4s, v31.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqrshl v25.4s, v25.4s, v29.4s\n"
"sqrshl v26.4s, v26.4s, v28.4s\n"
"sqrshl v27.4s, v27.4s, v29.4s\n"
"dup v28.8h, w2\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v24.8h, v24.8h, v28.8h\n"
"sqadd v26.8h, v26.8h, v28.8h\n"
"sqxtn v24.8b, v24.8h\n"
"sqxtn2 v24.16b, v26.8h\n"
"dup v28.8h, w0\n"
"dup v27.16b, w3\n"
"dup v29.16b, w4\n"
"ld1 {v25.4s}, [x10]\n"
"smax v24.16b, v24.16b, v27.16b\n"
"smin v24.16b, v24.16b, v29.16b\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"st1 {v24.8b}, [x6], x5\n"
"ld1 {v27.4s}, [x10]\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"mov v26.d[0], v24.d[1]\n"
"st1 {v26.8b}, [x6], x5\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"ld1 {v24.4s}, [%[bias_ptr]]\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"ld1 {v26.4s}, [%[bias_ptr]]\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"saddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"saddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"saddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"saddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"saddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"ld1 {v28.4s, v29.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v24.4s, v24.4s, v30.4s\n"
"sqrdmulh v25.4s, v25.4s, v31.4s\n"
"sqrdmulh v26.4s, v26.4s, v30.4s\n"
"sqrdmulh v27.4s, v27.4s, v31.4s\n"
"sqrshl v24.4s, v24.4s, v28.4s\n"
"sqrshl v25.4s, v25.4s, v29.4s\n"
"sqrshl v26.4s, v26.4s, v28.4s\n"
"sqrshl v27.4s, v27.4s, v29.4s\n"
"dup v28.8h, w2\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"sqxtn v26.4h, v26.4s\n"
"sqxtn2 v26.8h, v27.4s\n"
"sqadd v24.8h, v24.8h, v28.8h\n"
"sqadd v26.8h, v26.8h, v28.8h\n"
"sqxtn v24.8b, v24.8h\n"
"dup v28.16b, w3\n"
"dup v29.16b, w4\n"
"sqxtn2 v24.16b, v26.8h\n"
"smax v24.16b, v24.16b, v28.16b\n"
"smin v24.16b, v24.16b, v29.16b\n"
"st1 {v24.8b}, [x6], x5\n"
"mov v26.d[0], v24.d[1]\n"
"st1 {v26.8b}, [x6]\n"
"dup v28.8h, w0\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"dup v29.8h, w2\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"ld1 {v26.4s, v27.4s}, [%[output_shift_ptr]]\n"
"sqrdmulh v24.4s, v24.4s, v30.4s\n"
"sqrdmulh v25.4s, v25.4s, v31.4s\n"
"sqrshl v24.4s, v24.4s, v26.4s\n"
"sqrshl v25.4s, v25.4s, v27.4s\n"
"sqxtn v24.4h, v24.4s\n"
"sqxtn2 v24.8h, v25.4s\n"
"dup v26.16b, w3\n"
"dup v27.16b, w4\n"
"sqadd v24.8h, v24.8h, v29.8h\n"
"sqxtn v24.8b, v24.8h\n"
"smax v24.8b, v24.8b, v26.8b\n"
"smin v24.8b, v24.8b, v27.8b\n"
"st1 {v24.8b}, [x6]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[output_multiplier_ptr] "r"(output_multiplier_ptr),
[output_shift_ptr] "r"(output_shift_ptr),
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x2", "x3", "x4", "x5", "x6", "x7",
"x10", "x11", "x12", "x13", "x14", "x15",
"x19", "x20");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvPartialPerChannel<DepthwiseConvOutputRounding::kUpward,
EdgeType::kCenter, 1, 1> {
static inline void Run(const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const int8* input_ptr,
const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 1x1 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the 1x1 input and filter values.
//
// Use v6-v7 to hold output_multiplier & v10-v11 to hold output_shift.
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"dup v26.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"cmp x11, #16\n"
"dup v28.8h, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w9\n"
"dup v31.16b, w10\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"sshll v0.8h, v0.8b, #0\n"
// Loads output_multiplier & output_shift.
"ld1 {v6.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v10.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v11.4s}, [%[output_shift_ptr]], #16\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x11, x11, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x11, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"sqrdmulh v16.4s, v16.4s, v6.4s\n"
"sqrdmulh v17.4s, v17.4s, v7.4s\n"
"sqrshl v16.4s, v16.4s, v10.4s\n"
"sqrshl v17.4s, v17.4s, v11.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"sshll v0.8h, v0.8b, #0\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"ld1 {v6.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v10.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v11.4s}, [%[output_shift_ptr]], #16\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"sqrdmulh v16.4s, v16.4s, v6.4s\n"
"sqrdmulh v17.4s, v17.4s, v7.4s\n"
"sqrshl v16.4s, v16.4s, v10.4s\n"
"sqrshl v17.4s, v17.4s, v11.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[output_multiplier_ptr] "+r"(output_multiplier_ptr),
[output_shift_ptr] "+r"(output_shift_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v6", "v7", "v8", "v10", "v11", "v16", "v17", "v18", "v19",
"v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x9", "x10", "x11");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartialPerChannel<DepthwiseConvOutputRounding::kUpward,
EdgeType::kCorner, 1, 1> {
static inline void Run(const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const int8* input_ptr,
const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x2 input and
// filter values.
//
// Use v4-v5 to hold output_multiplier & v6-v7 to hold output_shift.
// Load input and filter values.
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr x9, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"cmp x15, #16\n"
"add x12, %[input_ptr], x15\n"
"add x13, %[input_ptr], x9\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"add x14, x13, x15\n"
"ld1 {v9.8b}, [x12], #8\n"
"ldr x6, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x9, %[filter_ptr], x15\n"
"ld1 {v10.8b}, [x13], #8\n"
"add x10, %[filter_ptr], x6\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"add x11, x10, x15\n"
"ld1 {v1.8b}, [x9], #8\n"
"ld1 {v2.8b}, [x10], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
// Load constants.
"ldr w6, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"dup v26.8h, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v28.8h, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.16b, w6\n"
"dup v31.16b, w7\n"
// Loads output_multiplier & output_shift.
"ld1 {v4.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v6.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v5.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_shift_ptr]], #16\n"
// Add input and filter offsets.
"saddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x15, x15, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x15, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], #8\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"ld1 {v1.8b}, [x9], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], #8\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v2.8b}, [x10], #8\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v5.4s\n"
"sqrshl v16.4s, v16.4s, v6.4s\n"
"sqrshl v17.4s, v17.4s, v7.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v4.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v6.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v5.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_shift_ptr]], #16\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"sqrdmulh v16.4s, v16.4s, v4.4s\n"
"sqrdmulh v17.4s, v17.4s, v5.4s\n"
"sqrshl v16.4s, v16.4s, v6.4s\n"
"sqrshl v17.4s, v17.4s, v7.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[output_multiplier_ptr] "+r"(output_multiplier_ptr),
[output_shift_ptr] "+r"(output_shift_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v16", "v17","v18", "v19", "v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartialPerChannel<DepthwiseConvOutputRounding::kUpward,
EdgeType::kHorizontal, 1, 1> {
static inline void Run(const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const int8* input_ptr,
const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x3 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x3 input and
// filter values.
//
// Use v6-v7 to hold output_multiplier & v14-v15 to hold output_shift.
// Load input and filter values.
"ldr x7, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x9, %[filter_ptr]\n"
"ldr x14, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ld1 {v8.8b}, [x12], x7\n"
"add x10, x9, x14\n"
"ld1 {v9.8b}, [x12], x7\n"
"cmp x15, #16\n"
"ld1 {v10.8b}, [x12]\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13], x7\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x13], x7\n"
"ld1 {v13.8b}, [x13]\n"
"ld1 {v0.8b}, [x9], x7\n"
"ld1 {v1.8b}, [x9], x7\n"
"ld1 {v2.8b}, [x9]\n"
"ld1 {v3.8b}, [x10], x7\n"
"ld1 {v4.8b}, [x10], x7\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v28.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.8b, w12\n"
"dup v31.8b, w13\n"
// Loads output_multiplier & output_shift.
"ld1 {v6.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v14.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v15.4s}, [%[output_shift_ptr]], #16\n"
// Add input and filter offsets.
"saddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x9, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x7\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x10, x9, x14\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], x7\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x12]\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x9], x7\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], x7\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x9], x7\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], x7\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9]\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"sqrdmulh v16.4s, v16.4s, v6.4s\n"
"ld1 {v3.8b}, [x10], x7\n"
"sqrdmulh v17.4s, v17.4s, v7.4s\n"
"ld1 {v4.8b}, [x10], x7\n"
"sqrshl v16.4s, v16.4s, v14.4s\n"
"ld1 {v5.8b}, [x10]\n"
"sqrshl v17.4s, v17.4s, v15.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v6.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v14.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v15.4s}, [%[output_shift_ptr]], #16\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"sqrdmulh v16.4s, v16.4s, v6.4s\n"
"sqrdmulh v17.4s, v17.4s, v7.4s\n"
"sqrshl v16.4s, v16.4s, v14.4s\n"
"sqrshl v17.4s, v17.4s, v15.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[output_multiplier_ptr] "+r"(output_multiplier_ptr),
[output_shift_ptr] "+r"(output_shift_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvPartialPerChannel<DepthwiseConvOutputRounding::kUpward,
EdgeType::kVertical, 1, 1> {
static inline void Run(const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const int8* input_ptr,
const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 3x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 3x2 input and
// filter values.
//
// Use v6-v7 to hold output_multiplier & v14-v15 to hold output_shift.
// Load input and filter values.
"ldr x6, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x7, %[filter_ptr]\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"add x14, x13, x11\n"
"ld1 {v8.8b}, [x12], x6\n"
"add x9, x7, x5\n"
"ld1 {v9.8b}, [x12]\n"
"cmp x15, #16\n"
"add x10, x9, x5\n"
"ld1 {v10.8b}, [x13], x6\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13]\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x14], x6\n"
"ld1 {v13.8b}, [x14]\n"
"ld1 {v0.8b}, [x7], x6\n"
"ld1 {v1.8b}, [x7]\n"
"ld1 {v2.8b}, [x9], x6\n"
"ld1 {v3.8b}, [x9]\n"
"ld1 {v4.8b}, [x10], x6\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_OFFSET) "]\n"
"dup v28.8h, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.8b, w12\n"
"dup v31.8b, w13\n"
// Loads output_multiplier & output_shift.
"ld1 {v6.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v14.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v15.4s}, [%[output_shift_ptr]], #16\n"
// Add input and filter offsets.
"saddw v8.8h, v26.8h, v8.8b\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add x14, x13, x11\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x7, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x6\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x9, x7, x5\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"add x10, x9, x5\n"
"ld1 {v9.8b}, [x12]\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], x6\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x7], x6\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13]\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x7]\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x14], x6\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9], x6\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x14]\n"
"sqrdmulh v16.4s, v16.4s, v6.4s\n"
"ld1 {v3.8b}, [x9]\n"
"sqrdmulh v17.4s, v17.4s, v7.4s\n"
"ld1 {v4.8b}, [x10], x6\n"
"sqrshl v16.4s, v16.4s, v14.4s\n"
"ld1 {v5.8b}, [x10]\n"
"sqrshl v17.4s, v17.4s, v15.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.8b}, [%[output_ptr]], #8\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"ld1 {v16.4s}, [%[bias_ptr]], #16\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v17.4s}, [%[bias_ptr]], #16\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v6.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v14.4s}, [%[output_shift_ptr]], #16\n"
"ld1 {v7.4s}, [%[output_multiplier_ptr]], #16\n"
"ld1 {v15.4s}, [%[output_shift_ptr]], #16\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"sqrdmulh v16.4s, v16.4s, v6.4s\n"
"sqrdmulh v17.4s, v17.4s, v7.4s\n"
"sqrshl v16.4s, v16.4s, v14.4s\n"
"sqrshl v17.4s, v17.4s, v15.4s\n"
"sqxtn v16.4h, v16.4s\n"
"sqxtn2 v16.8h, v17.4s\n"
"sqadd v16.8h, v16.8h, v28.8h\n"
"sqxtn v16.8b, v16.8h\n"
// TODO(b/129852264): Improve testing coverage.
"smax v16.8b, v16.8b, v30.8b\n"
"smin v16.8b, v16.8b, v31.8b\n"
"st1 {v16.8b}, [%[output_ptr]]\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[output_multiplier_ptr] "+r"(output_multiplier_ptr),
[output_shift_ptr] "+r"(output_shift_ptr)
:
// Inputs.
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
#undef OFFSET_INPUT_DEPTH
#undef OFFSET_INPUT_ROW_SIZE
#undef OFFSET_OUTPUT_DEPTH
#undef OFFSET_OUTPUT_ROW_SIZE
#undef OFFSET_INPUT_OFFSET
#undef OFFSET_OUTPUT_OFFSET
#undef OFFSET_OUTPUT_MULTIPLIER
#undef OFFSET_OUTPUT_ACTIVATION_MIN
#undef OFFSET_OUTPUT_ACTIVATION_MAX
#undef OFFSET_OUTPUT_RIGHT_SHIFT
#undef OFFSET_INPUT_WIDTH
#undef OFFSET_INPUT_HEIGHT
#undef OFFSET_OUTPUT_WIDTH
#undef OFFSET_OUTPUT_HEIGHT
template <DepthwiseConvOutputRounding output_rounding, int32 kStrideWidth,
int32 kStrideHeight>
struct DepthwiseConvThroughDepthPerChannel {
// Runs the DepthwiseConvWindowPerChannel kernels through the depth dimension
// from |start_depth| to |end_depth|. Keep this not inlined to maintain a
// small binary size. We use a DepthwiseConvParams struct for read only params
// to minimize call overhead.
static void __attribute__((noinline))
Run(const int32* output_multiplier_ptr, const int32* output_shift_ptr,
const int8* input_ptr, const int8* filter_ptr, const int32* bias_ptr,
int8* output_ptr, int64_t start_depth, int64_t end_depth,
int64_t input_depth, int64_t input_row_size, int32 output_window_height,
int32 output_window_width, const DepthwiseConvParams& params) {
for (; start_depth <= end_depth - 8; start_depth += 8) {
DepthwiseConvWindowPerChannel<output_rounding, 8, kStrideWidth,
kStrideHeight>::Run(output_multiplier_ptr,
output_shift_ptr,
input_ptr, filter_ptr,
bias_ptr, output_ptr,
input_depth,
input_row_size,
output_window_height,
output_window_width,
¶ms);
input_ptr += 8;
output_ptr += 8;
filter_ptr += 8;
bias_ptr += 8;
output_multiplier_ptr += 8;
output_shift_ptr += 8;
}
}
};
template <DepthwiseConvOutputRounding output_rounding, int32 kStrideWidth,
int32 kStrideHeight>
struct DepthwiseConvMultiRowPerChannel {
using ConvKernel =
DepthwiseConvThroughDepthPerChannel<output_rounding, kStrideWidth,
kStrideHeight>;
static inline void Run(const int32* output_multiplier,
const int32* output_shift, const int8* input_data,
int32 start_x, int32 end_x, const int8* filter_data,
const int32* bias_data, int8* output_data,
const DepthwiseConvParams& params,
const ShuffleParams& shuffle_params,
int8* shuffle_workspace) {
TFLITE_DCHECK(
shuffle_params.input_height ==
get_shuffle_input_size(kStrideHeight, shuffle_params.output_height));
TFLITE_DCHECK(
shuffle_params.input_width ==
get_shuffle_input_size(kStrideWidth, shuffle_params.output_width));
TFLITE_DCHECK_LE(
64 * shuffle_params.input_width * shuffle_params.input_height,
kDepthwiseConvScratchWorkspaceSize);
int32 out_x = start_x;
// Run shuffling on inputs with sufficiently large depth and width. When
// these parameters are large enough, more time is taken to load inputs
// from memory. At this point, it becomes useful to prefetch and
// preshuffle the input data to maximize locality.
if (params.output_depth > 64 ||
(params.output_depth <= 64 && params.input_width > 150)) {
for (; out_x <= (end_x - shuffle_params.output_width);
out_x += shuffle_params.output_width) {
const int8* input_ptr = input_data;
const int32* bias_ptr = bias_data;
const int32* output_multiplier_ptr = output_multiplier;
const int32* output_shift_ptr = output_shift;
const int8* filter_ptr = filter_data;
int8* output_ptr = output_data;
int64_t depth = 0;
const int64_t shuffle_row_size = 64 * shuffle_params.input_width;
for (; depth <= params.output_depth - 64; depth += 64) {
// Preload.
const int8* h_ptr = input_ptr;
for (int32 i = 0; i < shuffle_params.input_height; i++) {
const int8* ptr = h_ptr;
for (int32 j = 0; j < shuffle_params.input_width; j++) {
optimized_ops_preload_l1_keep(ptr);
ptr += params.input_depth;
}
h_ptr += params.input_row_size;
}
// For a large enough input, shuffle into buckets.
ShuffleInput(input_ptr, params.input_depth, params.input_width,
params.input_height, 64, shuffle_params.input_width,
shuffle_params.input_height, shuffle_workspace);
ConvKernel::Run(output_multiplier_ptr, output_shift_ptr,
shuffle_workspace, filter_ptr, bias_ptr, output_ptr,
0, 64, 64, shuffle_row_size,
shuffle_params.output_height,
shuffle_params.output_width, params);
input_ptr += 64;
output_ptr += 64;
filter_ptr += 64;
bias_ptr += 64;
output_multiplier_ptr += 64;
output_shift_ptr += 64;
}
// Preload.
const int8* h_ptr = input_ptr;
for (int32 i = 0; i < shuffle_params.input_height; i++) {
const int8* ptr = h_ptr;
for (int32 j = 0; j < shuffle_params.input_width; j++) {
optimized_ops_preload_l1_keep(ptr);
ptr += params.input_depth;
}
h_ptr += params.input_row_size;
}
// Handle leftover depth.
ConvKernel::Run(output_multiplier_ptr, output_shift_ptr, input_ptr,
filter_ptr, bias_ptr, output_ptr, depth,
params.output_depth, params.input_depth,
params.input_row_size, shuffle_params.output_height,
shuffle_params.output_width, params);
input_data +=
shuffle_params.output_width * kStrideWidth * params.input_depth;
output_data += shuffle_params.output_width * params.output_depth;
}
}
const int32 output_leftover_width = end_x - out_x;
if (output_leftover_width > 0) {
ConvKernel::Run(output_multiplier, output_shift, input_data, filter_data,
bias_data, output_data, 0, params.output_depth,
params.input_depth, params.input_row_size,
shuffle_params.output_height, output_leftover_width,
params);
}
}
};
// Processes the borders of the input for pad_width and pad_height = 1.
// Calls 4 asm kernels:
// * 1x1 input shape.
// * Corner edges.
// * Horizontal edges.
// * Vertical edges.
template <DepthwiseConvOutputRounding output_rounding>
inline void DepthwiseConvHandlePaddingPerChannel(
const int32* output_multiplier_ptr, const int32* output_shift_ptr,
const int8* input_data, const int8* filter_data, const int32* bias_data,
int8* output_data, const DepthwiseConvParams& params) {
if (params.input_width == 1 && params.input_height == 1) {
const int8* filter_ptr =
filter_data + params.filter_row_size + params.output_depth;
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kCenter, 1,
1>::Run(output_multiplier_ptr,
output_shift_ptr, input_data,
filter_ptr, bias_data, output_data,
¶ms);
return;
}
const int32 out_x_start_corner = 0;
const int32 out_x_end_corner = params.output_width - 1;
const int32 out_y_start_corner = 0;
const int32 out_y_end_corner = params.output_height - 1;
// Handle top row.
const int8* input_ptr = input_data;
const int8* filter_ptr =
filter_data + params.filter_row_size + params.output_depth;
int8* output_ptr = output_data;
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kCorner, 1, 1>::Run(
output_multiplier_ptr, output_shift_ptr, input_ptr, filter_ptr, bias_data,
output_ptr, ¶ms);
input_ptr += (params.stride_width - 1) * params.input_depth;
filter_ptr = filter_data + params.filter_row_size;
output_ptr += params.output_depth;
for (int32 out_x = out_x_start_corner + 1; out_x < out_x_end_corner;
out_x++) {
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kHorizontal, 1,
1>::Run(output_multiplier_ptr,
output_shift_ptr, input_ptr,
filter_ptr, bias_data, output_ptr,
¶ms);
input_ptr += params.stride_width * params.input_depth;
output_ptr += params.output_depth;
}
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kCorner, 1, 1>::Run(
output_multiplier_ptr, output_shift_ptr, input_ptr, filter_ptr, bias_data,
output_ptr, ¶ms);
// Handle left side.
input_ptr = input_data + (params.stride_width - 1) * params.input_row_size;
filter_ptr = filter_data + params.input_depth;
output_ptr = output_data + params.output_row_size;
for (int32 out_y = out_y_start_corner + 1; out_y < out_y_end_corner;
out_y++) {
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kVertical, 1,
1>::Run(output_multiplier_ptr,
output_shift_ptr, input_ptr,
filter_ptr, bias_data, output_ptr,
¶ms);
input_ptr += params.stride_width * params.input_row_size;
output_ptr += params.output_row_size;
}
// Handle right side.
input_ptr = input_data + (params.input_width - 2) * params.input_depth +
(params.stride_width - 1) * params.input_row_size;
filter_ptr = filter_data;
output_ptr = output_data + params.output_row_size +
(params.output_width - 1) * params.output_depth;
for (int32 out_y = out_y_start_corner + 1; out_y < out_y_end_corner;
out_y++) {
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kVertical, 1,
1>::Run(output_multiplier_ptr,
output_shift_ptr, input_ptr,
filter_ptr, bias_data, output_ptr,
¶ms);
input_ptr += params.stride_width * params.input_row_size;
output_ptr += params.output_row_size;
}
// Handle bottom row.
input_ptr = input_data + (params.input_height - 2) * params.input_row_size;
filter_ptr = filter_data + params.output_depth;
output_ptr =
output_data + (params.output_height - 1) * params.output_row_size;
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kCorner, 1, 1>::Run(
output_multiplier_ptr, output_shift_ptr, input_ptr, filter_ptr, bias_data,
output_ptr, ¶ms);
input_ptr += (params.stride_width == 1) ? 0 : params.input_depth;
filter_ptr = filter_data;
output_ptr += params.output_depth;
for (int32 out_x = out_x_start_corner + 1; out_x < out_x_end_corner;
out_x++) {
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kHorizontal, 1,
1>::Run(output_multiplier_ptr,
output_shift_ptr, input_ptr,
filter_ptr, bias_data, output_ptr,
¶ms);
input_ptr += params.stride_width * params.input_depth;
output_ptr += params.output_depth;
}
DepthwiseConvPartialPerChannel<output_rounding, EdgeType::kCorner, 1, 1>::Run(
output_multiplier_ptr, output_shift_ptr, input_ptr, filter_ptr, bias_data,
output_ptr, ¶ms);
}
template <DepthwiseConvOutputRounding output_rounding>
inline void DepthwiseConv3x3FilterPerChannel(
const DepthwiseParams& rt_params, const int32* output_multiplier_ptr,
const int32* output_shift_ptr, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
int thread_start, int thread_end, int thread_dim) {
DepthwiseConvParams params;
const int32 stride_width = rt_params.stride_width;
const int32 stride_height = rt_params.stride_height;
const int32 pad_width = rt_params.padding_values.width;
const int32 pad_height = rt_params.padding_values.height;
const int32 depth_multiplier = rt_params.depth_multiplier;
const int32 output_activation_min = rt_params.quantized_activation_min;
const int32 output_activation_max = rt_params.quantized_activation_max;
const int32 input_offset = rt_params.input_offset;
const int32 filter_offset = rt_params.weights_offset;
const int32 output_offset = rt_params.output_offset;
params.input_depth = input_shape.Dims(3);
params.input_width = input_shape.Dims(2);
params.input_height = input_shape.Dims(1);
params.input_row_size = params.input_depth * params.input_width;
params.input_offset = input_offset;
params.stride_width = stride_width;
params.stride_height = stride_height;
params.output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
params.output_width = output_shape.Dims(2);
params.output_height = output_shape.Dims(1);
params.output_row_size = params.output_depth * params.output_width;
params.output_offset = output_offset;
params.filter_offset = filter_offset;
params.output_activation_min = output_activation_min;
params.output_activation_max = output_activation_max;
const int32 filter_height = filter_shape.Dims(1);
const int32 filter_width = filter_shape.Dims(2);
params.filter_row_size = params.output_depth * filter_width;
// Algorithm assumes below constraints. It is optimized for depth
// multiplier of 1, 3x3 filter, no padding and strides 1 and 2.
TFLITE_DCHECK(params.output_depth == params.input_depth * depth_multiplier);
TFLITE_DCHECK(depth_multiplier == 1);
TFLITE_DCHECK(filter_height == 3);
TFLITE_DCHECK(filter_width == 3);
TFLITE_DCHECK(stride_height == 1 || stride_height == 2);
TFLITE_DCHECK(stride_width == 1 || stride_width == 2);
TFLITE_DCHECK(stride_width == stride_height);
TFLITE_DCHECK(pad_height == 0 || pad_height == 1);
TFLITE_DCHECK(pad_width == 0 || pad_width == 1);
TFLITE_DCHECK(pad_width == pad_height);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
const int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
const int64_t input_batch_size = params.input_row_size * params.input_height;
const int64_t output_batch_size =
params.output_row_size * params.output_height;
ShuffleParams one_row_shuffle_params, two_row_shuffle_params,
four_row_shuffle_params, eight_row_shuffle_params;
if (stride_width == 1) {
one_row_shuffle_params = ShuffleParams(30, 1, 1, 1);
two_row_shuffle_params = ShuffleParams(22, 2, 1, 1);
four_row_shuffle_params = ShuffleParams(14, 4, 1, 1);
eight_row_shuffle_params = ShuffleParams(8, 8, 1, 1);
} else {
one_row_shuffle_params = ShuffleParams(14, 1, 2, 2);
two_row_shuffle_params = ShuffleParams(8, 2, 2, 2);
four_row_shuffle_params = ShuffleParams(4, 4, 2, 2);
eight_row_shuffle_params = ShuffleParams(2, 8, 2, 2);
}
using conv_multirow_func_t =
decltype(&DepthwiseConvMultiRowPerChannel<output_rounding, 1, 1>::Run);
conv_multirow_func_t conv_multirow_func =
DepthwiseConvMultiRowPerChannel<output_rounding, 1, 1>::Run;
if (stride_width == 2) {
conv_multirow_func =
DepthwiseConvMultiRowPerChannel<output_rounding, 2, 2>::Run;
}
// Allocate maximum memory needed for shuffled input.
// TODO(mariewhite): The size of this workspace is small enough to be
// allocated on the stack. Eventually we will want to move it to the heap
// and have it allocated outside of this function, like the im2col_array
// used in gemmlowp.
int8 shuffle_workspace[kDepthwiseConvScratchWorkspaceSize];
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = params.output_height;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, params.output_height);
row_start = thread_start;
row_end = thread_end;
break;
}
for (int32 b = batch_start; b < batch_end; ++b) {
// input_ptr and output_ptr point to the start of each batch
const int8* input_ptr = input_data + b * input_batch_size;
int8* output_ptr = output_data + b * output_batch_size;
int32 out_x = 0;
int32 out_y = row_start;
int32 end_x = params.output_width;
int32 end_y = row_end;
if (pad_width == 1 && pad_height == 1) {
DepthwiseConvHandlePaddingPerChannel<output_rounding>(
output_multiplier_ptr, output_shift_ptr, input_ptr, filter_data,
bias_data, output_ptr, params);
// Update extents now that the edges have been handled.
out_x = 1;
end_x = params.output_width - 1;
out_y = std::max(1, out_y);
end_y = std::min(params.output_height - 1, end_y);
}
// pad_width and pad_height can both be 0 or 1, depending on padding option,
// such as Padding_VALID / Padding_SAME.
const int in_x = (out_x * stride_width) - pad_width;
const int in_y = (out_y * stride_height) - pad_height;
// input_ptr and output_ptr point to (in_y, in_x) and (out_y, out_x),
// respectively. (in_y, in_x) and (out_y, out_x) change along with
// row_start.
input_ptr += in_y * params.input_row_size + in_x * params.input_depth;
output_ptr += out_y * params.output_row_size + out_x * params.output_depth;
// Shuffling shapes that maximize width over the shuffle workspace size
// perform better since the inputs are closer together, minimizing
// shuffling time.
//
// If the input shape has width large enough for the 2 row kernels,
// we prefer to use this. The innermost loop of the kernels handle
// 2 height x 2 width so this is the fastest path.
//
// If the input shape has smaller width but larger height, shuffling is
// still useful and can benefit from kernels 4 row and 8 row kernels.
// Handle 8 rows at a time.
if (params.input_width < four_row_shuffle_params.input_width) {
for (; out_y <= end_y - 8; out_y += 8) {
conv_multirow_func(output_multiplier_ptr, output_shift_ptr, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
params, eight_row_shuffle_params, shuffle_workspace);
input_ptr += 8 * stride_height * params.input_row_size;
output_ptr += 8 * params.output_row_size;
}
}
// Handle 4 rows at a time.
if (params.input_width < two_row_shuffle_params.input_width) {
for (; out_y <= end_y - 4; out_y += 4) {
conv_multirow_func(output_multiplier_ptr, output_shift_ptr, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
params, four_row_shuffle_params, shuffle_workspace);
input_ptr += 4 * stride_height * params.input_row_size;
output_ptr += 4 * params.output_row_size;
}
}
// Handle 2 rows at a time.
for (; out_y <= end_y - 2; out_y += 2) {
conv_multirow_func(output_multiplier_ptr, output_shift_ptr, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
params, two_row_shuffle_params, shuffle_workspace);
input_ptr += 2 * stride_height * params.input_row_size;
output_ptr += 2 * params.output_row_size;
}
// Handle one row at a time.
for (; out_y < end_y; out_y++) {
conv_multirow_func(output_multiplier_ptr, output_shift_ptr, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
params, one_row_shuffle_params, shuffle_workspace);
input_ptr += stride_height * params.input_row_size;
output_ptr += params.output_row_size;
}
}
}
#endif // __aarch64__
#undef STR
#undef STR_UNEXPANDED
} // namespace depthwise_conv
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_3X3_FILTER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_3x3_filter.h | C++ | apache-2.0 | 130,056 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
namespace depthwise_conv {
// Initializes the accumulator buffer with zeros.
inline void DepthwiseConvInitAccBuffer(int num_output_pixels, int output_depth,
int32* acc_buffer) {
memset(acc_buffer, 0,
sizeof(acc_buffer[0]) * output_depth * num_output_pixels);
}
// Initializes the accumulator buffer with bias values.
inline void DepthwiseConvHybridGeneral(
const DepthwiseParams& params,
const float* input_scales, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const float* per_channel_scales,
const int32_t* input_offsets, int thread_start, int thread_end,
int thread_dim) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_rows = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
static const int kAccBufferMaxSize = 2048;
int32 acc_buffer[kAccBufferMaxSize];
TFLITE_DCHECK_GE(kAccBufferMaxSize, output_depth);
const int kOutputPixelsInAccBuffer = kAccBufferMaxSize / output_depth;
const int kAccBufferActualSize = kOutputPixelsInAccBuffer * output_depth;
TFLITE_DCHECK_LE(kOutputPixelsInAccBuffer * output_depth,
kAccBufferActualSize);
TFLITE_DCHECK_LE(kAccBufferActualSize, kAccBufferMaxSize);
TFLITE_DCHECK_GE(kOutputPixelsInAccBuffer, 1);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
// row_accum_func will point to the core accumulation function to be used
// for this DepthwiseConvHybrid op.
using row_accum_func_t = decltype(&QuantizedDepthwiseConvAccumRowGeneric);
row_accum_func_t row_accum_func = nullptr;
#define TFMINI_USE_DEPTHWISECONV_KERNEL(ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER) \
if (!row_accum_func && (stride_width == 1 || ALLOW_STRIDED) && \
(input_depth == FIXED_INPUT_DEPTH || FIXED_INPUT_DEPTH == 0) && \
depth_multiplier == FIXED_DEPTH_MULTIPLIER) { \
row_accum_func = \
QuantizedDepthwiseConvAccumRow<ALLOW_STRIDED, FIXED_INPUT_DEPTH, \
FIXED_DEPTH_MULTIPLIER>; \
}
#ifdef USE_NEON
// We go over our list of kernels by decreasing order of preference
// for the cases where multiple kernels could apply.
// Start with the fastest kernels: AllowStrided=false, fixed input depth.
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 1, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 4, 4)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(false, 12, 1)
// Next come the strided kernels: AllowStrided=true, fixed input depth.
// They are a bit less efficient, but allow stride!=1.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 16, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 16)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 20)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 32)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 1, 8)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 8, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 2, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 4, 1)
// Finally, the kernels allowing a variable input depth,
// these are the least efficient but most general kernels.
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 1)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 2)
TFMINI_USE_DEPTHWISECONV_KERNEL(true, 0, 3)
#endif // USE_NEON
// No matching fast kernel found, use slow fallback.
if (!row_accum_func) {
row_accum_func = QuantizedDepthwiseConvAccumRowGeneric;
}
#undef TFMINI_USE_DEPTHWISECONV_KERNEL
const int input_height_stride = input_shape.Dims(3) * input_shape.Dims(2);
const int input_batch_stride = input_height_stride * input_shape.Dims(1);
const int filter_height_stride = filter_shape.Dims(3) * filter_shape.Dims(2);
// Now that we have determined row_accum_func, we can start work.
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = output_rows;
int output_ptr_offset = 0;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
output_ptr_offset = batch_start * FlatSizeSkipDim(output_shape, 0);
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, output_rows);
row_start = thread_start;
row_end = thread_end;
output_ptr_offset = row_start * output_width * output_depth;
break;
}
float* output_ptr = output_data + output_ptr_offset;
int batch_step =
(output_rows + row_start - row_end) * output_width * output_depth;
for (int b = batch_start; b < batch_end; ++b) {
float input_scale = input_scales[b];
int32_t input_offset = input_offsets[b];
for (int out_y = row_start; out_y < row_end; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
const int filter_y_start =
std::max(0, (-in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
const int filter_y_end =
std::min(filter_height,
(input_height - in_y_origin + dilation_height_factor - 1) /
dilation_height_factor);
for (int out_x_buffer_start = 0; out_x_buffer_start < output_width;
out_x_buffer_start += kOutputPixelsInAccBuffer) {
const int out_x_buffer_end = std::min(
output_width, out_x_buffer_start + kOutputPixelsInAccBuffer);
// We call a 'pixel' a group of activation that share all but the
// 'depth'/'channel' coordinate. num_output_pixels is the number of
// output pixels that we will accumulate in this loop iteration.
const int num_output_pixels = out_x_buffer_end - out_x_buffer_start;
DepthwiseConvInitAccBuffer(num_output_pixels, output_depth, acc_buffer);
// Accumulation loop. Most of the time should be spent in here.
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
row_accum_func(
stride_width, dilation_width_factor, input_depth, input_width,
input_data + in_y * input_height_stride + b * input_batch_stride,
-input_offset, pad_width, depth_multiplier, filter_width,
filter_data + filter_y * filter_height_stride, out_x_buffer_start,
out_x_buffer_end, output_depth, acc_buffer);
}
// Finished accumulating int32 values. Just store them as float values
gemmlowp::ScopedProfilingLabel label("store");
const int num_output_values = output_depth * num_output_pixels;
int c = 0;
while (c < output_depth) {
int target_output_depth = output_depth;
#ifdef USE_NEON
const float32x4_t output_activation_min_vec =
vdupq_n_f32(output_activation_min);
const float32x4_t output_activation_max_vec =
vdupq_n_f32(output_activation_max);
const float32x4_t input_scale_32x4 = vdupq_n_f32(input_scale);
for (; c <= output_depth - 4; c += 4) {
if ((c + 4) > output_depth) {
break;
}
const float32x4_t channel_scale_32x4 =
vld1q_f32(per_channel_scales + c);
const float32x4_t bias_32x4 = vld1q_f32(bias_data + c);
for (int n = 0; n < num_output_pixels; ++n) {
int loc = n * output_depth + c;
int32x4_t acc = vld1q_s32(acc_buffer + loc);
float32x4_t float_acc = vcvtq_f32_s32(acc);
float_acc = vmulq_f32(float_acc, channel_scale_32x4);
float_acc = vmulq_f32(float_acc, input_scale_32x4);
float_acc = vaddq_f32(float_acc, bias_32x4);
float_acc = vmaxq_f32(float_acc, output_activation_min_vec);
float_acc = vminq_f32(float_acc, output_activation_max_vec);
vst1q_f32(output_ptr + loc, float_acc);
}
}
#endif // USE_NEON
for (; c < target_output_depth; c++) {
for (int n = 0; n < num_output_pixels; ++n) {
int loc = n * output_depth + c;
int32 acc = acc_buffer[loc];
float float_acc = acc * input_scale * per_channel_scales[c];
float_acc += bias_data[c];
float_acc = std::max(float_acc, output_activation_min);
float_acc = std::min(float_acc, output_activation_max);
output_ptr[loc] = float_acc;
}
}
}
output_ptr += num_output_values;
}
}
output_ptr += batch_step;
}
}
} // namespace depthwise_conv
template <DepthwiseConvOutputRounding kOutputRounding>
inline void DepthwiseConvHybridWithRounding(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
gemmlowp::ScopedProfilingLabel label("DepthwiseConvHybridInt8/8bit");
const int depth_multiplier = params.depth_multiplier;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_GE(dilation_width_factor, 1);
TFLITE_DCHECK_GE(dilation_height_factor, 1);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_depth = input_shape.Dims(3);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
// Call kernel optimized for depthwise convolutions using 3x3 filters if
// parameters are supported.
if (optimized_ops::depthwise_conv::Fast3x3FilterKernelSupported<
optimized_ops::depthwise_conv::QuantizationType::kNonPerChannelUint8>(
input_shape, filter_shape, stride_width, stride_height,
dilation_width_factor, dilation_height_factor, pad_width, pad_height,
depth_multiplier, output_shape, 0, nullptr)) {
gemmlowp::ScopedProfilingLabel specialized_label(
"DepthwiseConvHybridInt8/8bit/3x3");
optimized_ops::depthwise_conv::DepthwiseConvHybrid3x3FilterPerChannel<
DepthwiseConvOutputRounding::kUpward>(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
return;
}
#endif
gemmlowp::ScopedProfilingLabel specialized_label(
"DepthwiseConvHybridInt8/8bit/General");
depthwise_conv::DepthwiseConvHybridGeneral(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
inline void DepthwiseConvHybridImpl(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32_t* input_offsets,
int thread_start, int thread_end, int thread_dim) {
return DepthwiseConvHybridWithRounding<
DepthwiseConvOutputRounding::kAwayFromZero>(
params, input_scales, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
template <typename T, typename TS>
struct DepthwiseConvHybridWorkerTask : cpu_backend_threadpool::Task {
DepthwiseConvHybridWorkerTask(const DepthwiseParams& params,
const float* input_scales,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& filter_shape,
const T* filter_data,
const RuntimeShape& bias_shape,
const TS* bias_data,
const RuntimeShape& output_shape,
float* output_data,
const float* per_channel_scales,
const int32_t* input_offsets,
int thread_start, int thread_end,
int thread_dim)
: params(params),
input_scales(input_scales),
input_shape(input_shape),
input_data(input_data),
filter_shape(filter_shape),
filter_data(filter_data),
bias_shape(bias_shape),
bias_data(bias_data),
output_shape(output_shape),
output_data(output_data),
per_channel_scales(per_channel_scales),
input_offsets(input_offsets),
thread_start(thread_start),
thread_end(thread_end),
thread_dim(thread_dim) {}
void Run() override {
DepthwiseConvHybridImpl(params, input_scales, input_shape,
input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape,
output_data, per_channel_scales, input_offsets,
thread_start, thread_end, thread_dim);
}
private:
const DepthwiseParams& params;
const float* input_scales;
const RuntimeShape& input_shape;
const T* input_data;
const RuntimeShape& filter_shape;
const T* filter_data;
const RuntimeShape& bias_shape;
const TS* bias_data;
const RuntimeShape& output_shape;
float* output_data;
const float* per_channel_scales;
const int32_t* input_offsets;
int thread_start;
int thread_end;
int thread_dim;
};
inline void DepthwiseConvHybridPerChannel(
const DepthwiseParams& params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, int32_t* input_offsets,
CpuBackendContext* cpu_backend_context) {
gemmlowp::ScopedProfilingLabel label("DepthwiseConvHybridInt8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_batches = output_shape.Dims(0);
const int output_rows = output_shape.Dims(1);
int thread_count_batch = HowManyConvThreads(output_shape, filter_shape, 0);
int thread_count_row = HowManyConvThreads(output_shape, filter_shape, 1);
int thread_dim, thread_count, thread_dim_size;
if (thread_count_batch > thread_count_row) {
thread_dim = 0;
thread_dim_size = output_batches;
thread_count = thread_count_batch;
} else {
thread_dim = 1;
thread_dim_size = output_rows;
thread_count = thread_count_row;
}
const int max_threads = cpu_backend_context->max_num_threads();
thread_count = std::max(1, std::min(thread_count, max_threads));
if (thread_count == 1) {
DepthwiseConvHybridImpl(params, input_scales, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
per_channel_scales, input_offsets,
/*thread_start=*/0, /*thread_end=*/output_rows,
/*thread_dim=*/1);
} else {
std::vector<DepthwiseConvHybridWorkerTask<int8, float>> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks.emplace_back(params, input_scales, input_shape,
input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
per_channel_scales, input_offsets, thread_start,
thread_end, thread_dim);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid.h | C++ | apache-2.0 | 20,648 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_3X3_FILTER_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_3X3_FILTER_H_
#include <memory>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_3x3_filter_common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
namespace depthwise_conv {
#define STR(s) STR_UNEXPANDED(s)
#define STR_UNEXPANDED(s) #s
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
#include <stddef.h>
// Represents the number of bytes offset from the start of the
// DepthwiseConvParams struct. This is used in the asm to load parameters.
// Keep these values in sync with the static_asserts below.
#define OFFSET_INPUT_DEPTH 0
#define OFFSET_INPUT_ROW_SIZE 8
#define OFFSET_OUTPUT_DEPTH 16
#define OFFSET_OUTPUT_ROW_SIZE 24
#define OFFSET_FILTER_ROW_SIZE 32
#define OFFSET_INPUT_OFFSET 40
#define OFFSET_OUTPUT_OFFSET 44
#define OFFSET_OUTPUT_MULTIPLIER 52
#define OFFSET_OUTPUT_ACTIVATION_MIN 56
#define OFFSET_OUTPUT_ACTIVATION_MAX 60
#define OFFSET_OUTPUT_RIGHT_SHIFT 64
#define OFFSET_INPUT_WIDTH 68
#define OFFSET_INPUT_HEIGHT 72
#define OFFSET_STRIDE_WIDTH 76
#define OFFSET_STRIDE_HEIGHT 80
#define OFFSET_OUTPUT_WIDTH 84
#define OFFSET_OUTPUT_HEIGHT 88
#define OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN 92
#define OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX 96
static_assert(offsetof(DepthwiseConvParams, input_depth) == OFFSET_INPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvParams, input_row_size) ==
OFFSET_INPUT_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, output_depth) ==
OFFSET_OUTPUT_DEPTH,
"");
static_assert(offsetof(DepthwiseConvParams, output_row_size) ==
OFFSET_OUTPUT_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, filter_row_size) ==
OFFSET_FILTER_ROW_SIZE,
"");
static_assert(offsetof(DepthwiseConvParams, input_offset) ==
OFFSET_INPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, output_offset) ==
OFFSET_OUTPUT_OFFSET,
"");
static_assert(offsetof(DepthwiseConvParams, output_multiplier) ==
OFFSET_OUTPUT_MULTIPLIER,
"");
static_assert(offsetof(DepthwiseConvParams, output_activation_min) ==
OFFSET_OUTPUT_ACTIVATION_MIN,
"");
static_assert(offsetof(DepthwiseConvParams, output_activation_max) ==
OFFSET_OUTPUT_ACTIVATION_MAX,
"");
static_assert(offsetof(DepthwiseConvParams, output_right_shift) ==
OFFSET_OUTPUT_RIGHT_SHIFT,
"");
static_assert(offsetof(DepthwiseConvParams, input_width) == OFFSET_INPUT_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, input_height) ==
OFFSET_INPUT_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, stride_width) ==
OFFSET_STRIDE_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, stride_height) ==
OFFSET_STRIDE_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, output_width) ==
OFFSET_OUTPUT_WIDTH,
"");
static_assert(offsetof(DepthwiseConvParams, output_height) ==
OFFSET_OUTPUT_HEIGHT,
"");
static_assert(offsetof(DepthwiseConvParams, float_output_activation_min) ==
OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN,
"");
static_assert(offsetof(DepthwiseConvParams, float_output_activation_max) ==
OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX,
"");
template <DepthwiseConvOutputRounding output_rounding, int32 kDepth,
int32 kStrideWidth, int32 kStrideHeight>
struct DepthwiseConvHybridWindowPerChannel {};
template <DepthwiseConvOutputRounding output_rounding, EdgeType kEdgeType,
int kPadWidth, int kPadHeight>
struct DepthwiseConvHybridPartialPerChannel {};
template <>
struct DepthwiseConvHybridWindowPerChannel<DepthwiseConvOutputRounding::kUpward,
8, 1, 1> {
public:
static inline void Run(const float* input_scale,
const int8* input_ptr,
const int8* filter_ptr, const float* bias_ptr,
float* output_ptr, int64_t input_depth,
int64_t input_row_size, int32 output_window_height,
int32 output_window_width,
const float* per_channel_scales,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 2 * input_depth;
const int64_t input_height_increment = 2 * input_row_size;
const int64_t output_height_increment = 2 * 4 * params_ptr->output_row_size;
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time, load inputs for a 2x1 (2
// height, 1 width) output window (4x3 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 2x1 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time, load inputs for a 1x2 (1
// height, 2 width) output window (3x4 input window).
// Registers v9--v20 hold input values. Mul-add with
// accumulators v21--v24. Then run activation, downquantize
// and store. Repeat for the next 1x2 output window,
// leveraging overlapping inputs.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
//
// This logic is copied and modified from the non-per-channel quantized
// part.
// However, the challenges are how to plan the registers allocation
// wisely: 25 NEON registers are already reserved for inputs, filters,
// and outputs; also, 2 registers (v30, v31) are used for output
// min/max, while another 2 registers (v26, v29) are used for input
// offset & output offset, so that's total 25 + 2 + 2 = 29 already.
// But we need 4 more registers to hold the output multiplier & output
// right shift (we only have 3).
//
// So here's the plan:
// v27 (which held duplicated output multiplier previously) will hold
// the first 4 values of the output_multiplier_ptr (we have 8 in total);
// v30 (which held duplicated output right shift previously) will hold
// the first 4 values of the output_shift_ptr (we have 8 in total);
// lastly, v28 will hold the last 4 values of output_mulitplier and v31
// (previously occupied by activations) will hold the last 4 values of
// output_shift. Then v25 will be used for output activation min while
// output activation max will just reuse oother registers, like v24.
//
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see
// http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x3, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"cmp %w[output_window_height], #2\n"
"ldr w4, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w0, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v25.4s, w4\n"
"dup v29.4s, w0\n"
"ldr x1, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"mov x4, #4\n"
"mul x1, x1, x4\n"
"mul x4, x4, x3\n"
// Load per_channel scales and bias (float).
"ldr w2, [%[input_scale]]\n"
"ld1 {v27.4s, v28.4s}, [%[per_channel_scales]]\n"
"ld1 {v30.4s, v31.4s}, [%[bias_ptr]]\n"
"dup v26.4s, w2\n"
"fmul v27.4s, v27.4s, v26.4s\n"
"fmul v28.4s, v28.4s, v26.4s\n"
"dup v26.8h, w9\n"
// Load filters and add offsets.
"ld1 {v0.8b}, [%[filter_ptr]], x3\n"
"ld1 {v1.8b}, [%[filter_ptr]], x3\n"
"sshll v0.8h, v0.8b, #0\n"
"ld1 {v2.8b}, [%[filter_ptr]], x3\n"
"sshll v1.8h, v1.8b, #0\n"
"ld1 {v3.8b}, [%[filter_ptr]], x3\n"
"sshll v2.8h, v2.8b, #0\n"
"ld1 {v4.8b}, [%[filter_ptr]], x3\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v5.8b}, [%[filter_ptr]], x3\n"
"sshll v4.8h, v4.8b, #0\n"
"ld1 {v6.8b}, [%[filter_ptr]], x3\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v7.8b}, [%[filter_ptr]], x3\n"
"sshll v6.8h, v6.8b, #0\n"
"ld1 {v8.8b}, [%[filter_ptr]], x3\n"
"sshll v7.8h, v7.8b, #0\n"
"sshll v8.8h, v8.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// This loop processes 2x2 outputs. To avoid register exhaustion,
// inputs for the left 2 outputs are loaded first, then the right
// two outputs.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, x11, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 2x1 outputs (2 height,
// 1 width) in anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// loads, otherwise jump to specific the appropriate label to handle
// smaller widths.
"cmp w5, #2\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"movi v21.4s, #0\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"movi v22.4s, #0\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"movi v23.4s, #0\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"movi v24.4s, #0\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"subs w5, w5, #2\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"cmp w5, #3\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x7], x4\n"
// Reset to int
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"movi v22.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"movi v21.4s, #0\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"movi v23.4s, #0\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"mov x12, x11\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"add x13, x11, %[input_row_size]\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"add x14, x13, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"ld1 {v15.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"ld1 {v16.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"ld1 {v18.8b}, [x15], %[input_depth]\n"
"ld1 {v19.8b}, [x15], %[input_depth]\n"
"ld1 {v20.8b}, [x15], %[input_depth]\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x7], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"movi v22.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"movi v21.4s, #0\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"movi v23.4s, #0\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
// Mul-add left outputs.
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"ld1 {v9.8b}, [x12]\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"ld1 {v12.8b}, [x13]\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"ld1 {v15.8b}, [x14]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"ld1 {v18.8b}, [x15]\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x7], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"movi v22.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"movi v21.4s, #0\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"movi v23.4s, #0\n"
// Mul-add right outputs.
"smlal v21.4s, v0.4h, v10.4h\n"
"smlal2 v22.4s, v0.8h, v10.8h\n"
"smlal v23.4s, v0.4h, v13.4h\n"
"smlal2 v24.4s, v0.8h, v13.8h\n"
"smlal v21.4s, v1.4h, v11.4h\n"
"smlal2 v22.4s, v1.8h, v11.8h\n"
"smlal v23.4s, v1.4h, v14.4h\n"
"smlal2 v24.4s, v1.8h, v14.8h\n"
"smlal v21.4s, v2.4h, v9.4h\n"
"smlal2 v22.4s, v2.8h, v9.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"smlal v21.4s, v5.4h, v12.4h\n"
"smlal2 v22.4s, v5.8h, v12.8h\n"
"smlal v23.4s, v5.4h, v15.4h\n"
"smlal2 v24.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v16.4h\n"
"smlal2 v22.4s, v6.8h, v16.8h\n"
"smlal v23.4s, v6.4h, v19.4h\n"
"smlal2 v24.4s, v6.8h, v19.8h\n"
"smlal v21.4s, v7.4h, v17.4h\n"
"smlal2 v22.4s, v7.8h, v17.8h\n"
"smlal v23.4s, v7.4h, v20.4h\n"
"smlal2 v24.4s, v7.8h, v20.8h\n"
"smlal v21.4s, v8.4h, v15.4h\n"
"smlal2 v22.4s, v8.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v18.4h\n"
"smlal2 v24.4s, v8.8h, v18.8h\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x7], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v14.4h\n"
"smlal2 v24.4s, v2.8h, v14.8h\n"
"smlal v21.4s, v3.4h, v12.4h\n"
"smlal2 v22.4s, v3.8h, v12.8h\n"
"smlal v23.4s, v3.4h, v15.4h\n"
"smlal2 v24.4s, v3.8h, v15.8h\n"
"smlal v21.4s, v4.4h, v13.4h\n"
"smlal2 v22.4s, v4.8h, v13.8h\n"
"smlal v23.4s, v4.4h, v16.4h\n"
"smlal2 v24.4s, v4.8h, v16.8h\n"
"smlal v21.4s, v5.4h, v14.4h\n"
"smlal2 v22.4s, v5.8h, v14.8h\n"
"smlal v23.4s, v5.4h, v17.4h\n"
"smlal2 v24.4s, v5.8h, v17.8h\n"
"smlal v21.4s, v6.4h, v15.4h\n"
"smlal2 v22.4s, v6.8h, v15.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v16.4h\n"
"smlal2 v22.4s, v7.8h, v16.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x7], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x12, %[input_ptr]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"add x14, x13, %[input_row_size]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x14, %[input_row_size]\n"
"mov w5, %w[output_window_width]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x1\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w5, #2\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"movi v21.4s, #0\n"
"movi v22.4s, #0\n"
"movi v23.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w5, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
// Load inputs for 3x4 input window which corresponds to a 1x2 output
// window.
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"subs w5, w5, #2\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"cmp w5, #3\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"add %[input_ptr], %[input_ptr], %[input_width_increment]\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"mov x12, %[input_ptr]\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x13, %[input_ptr], %[input_row_size]\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"add x14, x13, %[input_row_size]\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"add x15, x14, %[input_row_size]\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"ld1 {v17.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"ld1 {v18.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"ld1 {v19.8b}, [x14], %[input_depth]\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [%[output_ptr]], x4\n"
"st1 {v23.4s, v24.4s}, [%[output_ptr]], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"movi v22.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"saddw v14.8h, v26.8h, v14.8b\n"
"saddw v15.8h, v26.8h, v15.8b\n"
"movi v21.4s, #0\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"movi v23.4s, #0\n"
"saddw v17.8h, v26.8h, v17.8b\n"
"saddw v18.8h, v26.8h, v18.8b\n"
"saddw v19.8h, v26.8h, v19.8b\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w5, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v0.4h, v10.4h\n"
"ld1 {v20.8b}, [x14], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v10.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v23.4s, v1.4h, v11.4h\n"
"smlal2 v24.4s, v1.8h, v11.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v23.4s, v2.4h, v12.4h\n"
"smlal2 v24.4s, v2.8h, v12.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v23.4s, v3.4h, v14.4h\n"
"smlal2 v24.4s, v3.8h, v14.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v23.4s, v4.4h, v15.4h\n"
"smlal2 v24.4s, v4.8h, v15.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"saddw v16.8h, v26.8h, v16.8b\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v23.4s, v5.4h, v16.4h\n"
"smlal2 v24.4s, v5.8h, v16.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v23.4s, v6.4h, v18.4h\n"
"smlal2 v24.4s, v6.8h, v18.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v23.4s, v7.4h, v19.4h\n"
"smlal2 v24.4s, v7.8h, v19.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"saddw v20.8h, v26.8h, v20.8b\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"smlal v23.4s, v8.4h, v20.4h\n"
"smlal2 v24.4s, v8.8h, v20.8h\n"
// Cast to float.
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fmul v23.4s, v23.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v28.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v30.4s\n"
"fadd v24.4s, v24.4s, v31.4s\n"
// Clamp range.
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v25.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v25.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store to float.
"st1 {v21.4s, v22.4s}, [%[output_ptr]], x4\n"
"st1 {v23.4s, v24.4s}, [%[output_ptr]], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"smlal v21.4s, v3.4h, v13.4h\n"
"smlal2 v22.4s, v3.8h, v13.8h\n"
"smlal v21.4s, v4.4h, v14.4h\n"
"smlal2 v22.4s, v4.8h, v14.8h\n"
"smlal v21.4s, v5.4h, v15.4h\n"
"smlal2 v22.4s, v5.8h, v15.8h\n"
"smlal v21.4s, v6.4h, v17.4h\n"
"smlal2 v22.4s, v6.8h, v17.8h\n"
"smlal v21.4s, v7.4h, v18.4h\n"
"smlal2 v22.4s, v7.8h, v18.8h\n"
"smlal v21.4s, v8.4h, v19.4h\n"
"smlal2 v22.4s, v8.8h, v19.8h\n"
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"fmul v21.4s, v21.4s, v27.4s\n"
"fmul v22.4s, v22.4s, v28.4s\n"
"fadd v21.4s, v21.4s, v30.4s\n"
"fadd v22.4s, v22.4s, v31.4s\n"
"fmax v21.4s, v21.4s, v25.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v25.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"st1 {v21.4s, v22.4s}, [%[output_ptr]]\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height),
[per_channel_scales] "+r"(per_channel_scales)
:
// Inputs.
[input_scale] "r"(input_scale),
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7",
"x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvHybridWindowPerChannel<DepthwiseConvOutputRounding::kUpward,
8, 2, 2> {
static inline void Run(const float* input_scale, const int8* input_ptr,
const int8* filter_ptr, const float* bias_ptr,
float* output_ptr, int64_t input_depth,
int64_t input_row_size, int32 output_window_height,
int32 output_window_width,
const float* per_channel_scales,
const DepthwiseConvParams* params_ptr) {
const int64_t input_width_increment = 4 * input_depth;
const int64_t input_height_increment = 4 * input_row_size;
const int64_t output_height_increment = 2 * 4 * params_ptr->output_row_size;
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "1"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "2"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "3"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "4"
#define DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "5"
#define DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "6"
#define DEPTHWISECONV_LABEL_HEIGHT_1 "7"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "8"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "9"
#define DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "10"
#define DEPTHWISECONV_LABEL_HEIGHT_1_END "11"
asm volatile(
// Performs depthwise convolutions for a window specified by
// |output_window_height| and |output_window_width|. The inner-most loop
// processes 2x2 outputs, and any leftovers at the end.
//
// Algorithm works as follows:
//
// 1. Load filters of 8 depth (8x3x3). Registers v0--v8 hold filter
// values.
// 2. For 2 output heights at a time:
// i. For 2 output widths at a time at stride 2, a 5x5 input
// window is required. To avoid register exhaustion, we load
// the first 2 rows of the 5x5 input window into registers
// v9--v18, and use the same registers to load the next 2
// rows, and finally v9--v13 to load the last row.
// Accumulators for all 2x2 outputs are reserved by registers
// v21-v22 (top left output), v23-v24 (top right output),
// v19-v20 (bottom left output), v25-v26 (bottom right
// output).
// ii. Handle single leftover width if exists.
// 3. Handle single leftover height if exists.
// i. For 2 output widths at a time at stride 2, load inputs for
// a 1x2 (1 height, 2 width) output window (3x5 input
// window). Registers v9--v24 hold input values. Mul-add with
// accumulators v24--v27.
// ii. Handle single leftover width if exists.
//
// Loads are placed as soon as the register is no longer needed and
// interleaved with arithmetic operations to take advantage of
// dual-issue pipelines. We also add input offsets as far from the loads
// as possible to give loads enough cycles to fetch data from memory.
//
// This logic is copied and modified from the non-per-channel quantized
// part.
// The register planning here is really tricky:
// v0-v29 are all used at least once for either filter/input/output,
// some of them are used for output shift and output mulitplier, or
// input/output offset.
// Only v30 & v31 are only used for output activation min/max.
// For per-channel case, we need 4 registers to hold output shift &
// output multiplier. However, given the reality, we simply cannot do
// that without reloading.
//
// So here's the plan:
// We hold output_multiplier in v30 & v31, and we will load output_shift
// into two consecutive registers each time before use.
// We will duplicate output min & max before needed.
// Sometimes we may borrow registers from input offset or bias, we will
// dup them back after use.
//
// Set "constant" registers. These registers may be replaced with temp
// values from time to time when there are not enough NEON registers.
// We use x9--x15 general purpose registers as they are caller-saved
// temporary registers (see http://infocenter.arm.com/help/topic/com.arm.doc.ihi0055b/IHI0055B_aapcs64.pdf). // NOLINT
"ldr w0, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"cmp %w[output_window_height], #2\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr x19, [%[params_ptr], #" STR(OFFSET_OUTPUT_ROW_SIZE) "]\n"
"mov x4, #4\n"
"mul x19, x19, x4\n"
"mul x4, x4, x5\n"
"ldr w2, [%[input_scale]]\n"
"dup v28.4s, w2\n"
"ldr w3, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w2, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v29.4s, w2\n"
"ld1 {v30.4s, v31.4s}, [%[per_channel_scales]]\n"
"fmul v30.4s, v30.4s, v28.4s\n"
"fmul v31.4s, v31.4s, v28.4s\n"
"dup v28.8h, w0\n"
// Load filters and add offsets.
"ld1 {v0.8b}, [%[filter_ptr]], x5\n"
"ld1 {v1.8b}, [%[filter_ptr]], x5\n"
"sshll v0.8h, v0.8b, #0\n"
"ld1 {v2.8b}, [%[filter_ptr]], x5\n"
"sshll v1.8h, v1.8b, #0\n"
"ld1 {v3.8b}, [%[filter_ptr]], x5\n"
"sshll v2.8h, v2.8b, #0\n"
"ld1 {v4.8b}, [%[filter_ptr]], x5\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v5.8b}, [%[filter_ptr]], x5\n"
"sshll v4.8h, v4.8b, #0\n"
"ld1 {v6.8b}, [%[filter_ptr]], x5\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v7.8b}, [%[filter_ptr]], x5\n"
"sshll v6.8h, v6.8b, #0\n"
"ld1 {v8.8b}, [%[filter_ptr]]\n"
"sshll v7.8h, v7.8b, #0\n"
"sshll v8.8h, v8.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_LOOP ":\n"
// Load the first two rows of the 5x5 input window, then reuse the
// same registers to load subsequent rows as they become available.
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
// The height 2 / width 2 loop loads an extra 1 output horizontally in
// anticipation for the next iteration. Make sure
// |output_window_width| is large enough to handle the additional
// load, otherwise jump to the appropriate label to handle smaller
// widths.
"cmp w14, #2\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"add x7, %[output_ptr], x19\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"movi v21.4s, #0\n"
"movi v22.4s, #0\n"
"movi v23.4s, #0\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"movi v24.4s, #0\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"movi v19.4s, #0\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"movi v20.4s, #0\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"movi v25.4s, #0\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"movi v26.4s, #0\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"subs w14, w14, #2\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"cmp w14, #3\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"mov x12, x11\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"add x15, x13, %[input_row_size]\n"
// Cast to float.
"ld1 {v27.4s, v28.4s}, [%[bias_ptr]]\n"
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v30.4s\n"
"fmul v22.4s, v22.4s, v31.4s\n"
"fmul v23.4s, v23.4s, v30.4s\n"
"fmul v24.4s, v24.4s, v31.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v27.4s\n"
"fadd v22.4s, v22.4s, v28.4s\n"
"fadd v23.4s, v23.4s, v27.4s\n"
"fadd v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"dup v27.4s, w3\n"
"fmax v21.4s, v21.4s, v27.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v27.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v27.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x6], x4\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"movi v22.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"movi v21.4s, #0\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"movi v23.4s, #0\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x13], %[input_depth]\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
// Cast to float.
"ld1 {v27.4s, v28.4s}, [%[bias_ptr]]\n"
"scvtf v19.4s, v19.4s\n"
"scvtf v20.4s, v20.4s\n"
"scvtf v25.4s, v25.4s\n"
"scvtf v26.4s, v26.4s\n"
// Multiply by per channel scale.
"fmul v19.4s, v19.4s, v30.4s\n"
"fmul v20.4s, v20.4s, v31.4s\n"
"fmul v25.4s, v25.4s, v30.4s\n"
"fmul v26.4s, v26.4s, v31.4s\n"
// Add bias.
"fadd v19.4s, v19.4s, v27.4s\n"
"fadd v20.4s, v20.4s, v28.4s\n"
"fadd v25.4s, v25.4s, v27.4s\n"
"fadd v26.4s, v26.4s, v28.4s\n"
"dup v27.4s, w3\n"
"fmax v19.4s, v19.4s, v27.4s\n"
"fmin v19.4s, v19.4s, v29.4s\n"
"fmax v20.4s, v20.4s, v27.4s\n"
"fmin v20.4s, v20.4s, v29.4s\n"
"fmax v25.4s, v25.4s, v27.4s\n"
"fmin v25.4s, v25.4s, v29.4s\n"
"fmax v26.4s, v26.4s, v27.4s\n"
"fmin v26.4s, v26.4s, v29.4s\n"
"dup v28.8h, w0\n"
// Store.
"st1 {v19.4s, v20.4s}, [x7], x4\n"
"st1 {v25.4s, v26.4s}, [x7], x4\n"
"fcvtms v19.4s, v19.4s\n"
"fcvtms v20.4s, v20.4s\n"
"fcvtms v25.4s, v25.4s\n"
"fcvtms v26.4s, v26.4s\n"
"movi v20.4s, #0\n"
"movi v26.4s, #0\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"movi v19.4s, #0\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"movi v25.4s, #0\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER "f\n"
// Handle last 2 columns if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER ":\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v12.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v13.8b}, [x12]\n"
"add x12, x15, %[input_row_size]\n"
"smlal v23.4s, v0.4h, v11.4h\n"
"ld1 {v17.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v0.8h, v11.8h\n"
"ld1 {v18.8b}, [x13]\n"
"add x13, x12, %[input_row_size]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v3.4h, v16.4h\n"
"smlal2 v24.4s, v3.8h, v16.8h\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"ld1 {v16.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v1.4h, v12.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v1.8h, v12.8h\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v23.4s, v2.4h, v13.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v24.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x15]\n"
"smlal v23.4s, v4.4h, v17.4h\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"smlal2 v24.4s, v4.8h, v17.8h\n"
"ld1 {v17.8b}, [x12], %[input_depth]\n"
"smlal v23.4s, v5.4h, v18.4h\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"smlal2 v24.4s, v5.8h, v18.8h\n"
"ld1 {v18.8b}, [x12]\n"
"smlal v21.4s, v6.4h, v9.4h\n"
"smlal2 v22.4s, v6.8h, v9.8h\n"
"smlal v19.4s, v0.4h, v9.4h\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v20.4s, v0.8h, v9.8h\n"
"ld1 {v9.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v6.4h, v11.4h\n"
"smlal2 v24.4s, v6.8h, v11.8h\n"
"smlal v21.4s, v7.4h, v10.4h\n"
"smlal2 v22.4s, v7.8h, v10.8h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal v19.4s, v1.4h, v10.4h\n"
"smlal2 v20.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v7.4h, v12.4h\n"
"smlal2 v24.4s, v7.8h, v12.8h\n"
"smlal v25.4s, v1.4h, v12.4h\n"
"smlal2 v26.4s, v1.8h, v12.8h\n"
"smlal v21.4s, v8.4h, v11.4h\n"
"smlal2 v22.4s, v8.8h, v11.8h\n"
"smlal v19.4s, v2.4h, v11.4h\n"
"smlal2 v20.4s, v2.8h, v11.8h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal v25.4s, v0.4h, v11.4h\n"
"smlal2 v26.4s, v0.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], %[input_depth]\n"
"smlal v23.4s, v8.4h, v13.4h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal2 v24.4s, v8.8h, v13.8h\n"
"smlal v25.4s, v2.4h, v13.4h\n"
"smlal2 v26.4s, v2.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"ld1 {v27.4s, v28.4s}, [%[bias_ptr]]\n"
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
// Multiply by per channel scale.
"fmul v21.4s, v21.4s, v30.4s\n"
"fmul v22.4s, v22.4s, v31.4s\n"
"fmul v23.4s, v23.4s, v30.4s\n"
"fmul v24.4s, v24.4s, v31.4s\n"
// Add bias.
"fadd v21.4s, v21.4s, v27.4s\n"
"fadd v22.4s, v22.4s, v28.4s\n"
"fadd v23.4s, v23.4s, v27.4s\n"
"fadd v24.4s, v24.4s, v28.4s\n"
"dup v28.8h, w0\n"
"dup v27.4s, w3\n"
"fmax v21.4s, v21.4s, v27.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v27.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"fmax v23.4s, v23.4s, v27.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v27.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
// Store.
"st1 {v21.4s, v22.4s}, [x6], x4\n"
"st1 {v23.4s, v24.4s}, [x6]\n"
// Reset to int.
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
"movi v22.4s, #0\n"
"movi v24.4s, #0\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal v19.4s, v6.4h, v9.4h\n"
"smlal2 v20.4s, v6.8h, v9.8h\n"
"smlal v25.4s, v6.4h, v11.4h\n"
"smlal2 v26.4s, v6.8h, v11.8h\n"
"smlal v19.4s, v7.4h, v10.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v20.4s, v7.8h, v10.8h\n"
"smlal v25.4s, v7.4h, v12.4h\n"
"smlal2 v26.4s, v7.8h, v12.8h\n"
"smlal v19.4s, v8.4h, v11.4h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"smlal2 v20.4s, v8.8h, v11.8h\n"
"smlal v25.4s, v8.4h, v13.4h\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"smlal2 v26.4s, v8.8h, v13.8h\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"smlal v19.4s, v3.4h, v14.4h\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v20.4s, v3.8h, v14.8h\n"
"smlal v25.4s, v3.4h, v16.4h\n"
"smlal2 v26.4s, v3.8h, v16.8h\n"
"smlal v19.4s, v4.4h, v15.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v20.4s, v4.8h, v15.8h\n"
"smlal v25.4s, v4.4h, v17.4h\n"
"smlal2 v26.4s, v4.8h, v17.8h\n"
"smlal v19.4s, v5.4h, v16.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v20.4s, v5.8h, v16.8h\n"
"smlal v25.4s, v5.4h, v18.4h\n"
"smlal2 v26.4s, v5.8h, v18.8h\n"
// Cast to float.
"ld1 {v27.4s, v28.4s}, [%[bias_ptr]]\n"
"scvtf v19.4s, v19.4s\n"
"scvtf v20.4s, v20.4s\n"
"scvtf v25.4s, v25.4s\n"
"scvtf v26.4s, v26.4s\n"
// Multiply by per channel scale.
"fmul v19.4s, v19.4s, v30.4s\n"
"fmul v20.4s, v20.4s, v31.4s\n"
"fmul v25.4s, v25.4s, v30.4s\n"
"fmul v26.4s, v26.4s, v31.4s\n"
// Add bias.
"fadd v19.4s, v19.4s, v27.4s\n"
"fadd v20.4s, v20.4s, v28.4s\n"
"fadd v25.4s, v25.4s, v27.4s\n"
"fadd v26.4s, v26.4s, v28.4s\n"
"dup v28.8h, w0\n"
"dup v27.4s, w3\n"
"fmax v19.4s, v19.4s, v27.4s\n"
"fmin v19.4s, v19.4s, v29.4s\n"
"fmax v20.4s, v20.4s, v27.4s\n"
"fmin v20.4s, v20.4s, v29.4s\n"
"fmax v25.4s, v25.4s, v27.4s\n"
"fmin v25.4s, v25.4s, v29.4s\n"
"fmax v26.4s, v26.4s, v27.4s\n"
"fmin v26.4s, v26.4s, v29.4s\n"
"dup v28.8h, w0\n"
// Store.
"st1 {v19.4s, v20.4s}, [x7], x4\n"
"st1 {v25.4s, v26.4s}, [x7]\n"
"fcvtms v19.4s, v19.4s\n"
"fcvtms v20.4s, v20.4s\n"
"fcvtms v25.4s, v25.4s\n"
"fcvtms v26.4s, v26.4s\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP "f\n"
// Handle last column if exists.
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER ":\n"
// Registers v9, v10, v11, v14, v15, and v16 have already been loaded
// with the correct values at this point. This corresponds to the
// first two input rows of the top left output. Now load the last
// input row for this output. Once these inputs are no longer needed,
// load the input rows for the bottom left output.
"add x12, x15, %[input_row_size]\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v12.8b}, [x15], %[input_depth]\n"
"smlal v21.4s, v0.4h, v9.4h\n"
"ld1 {v13.8b}, [x15], %[input_depth]\n"
"smlal2 v22.4s, v0.8h, v9.8h\n"
"ld1 {v17.8b}, [x15]\n"
"smlal v21.4s, v1.4h, v10.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v22.4s, v1.8h, v10.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v21.4s, v2.4h, v11.4h\n"
"smlal2 v22.4s, v2.8h, v11.8h\n"
"ld1 {v11.8b}, [x12]\n"
"smlal v21.4s, v3.4h, v14.4h\n"
"smlal2 v22.4s, v3.8h, v14.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v4.4h, v15.4h\n"
"smlal2 v22.4s, v4.8h, v15.8h\n"
"ld1 {v15.8b}, [x13], %[input_depth]\n"
"smlal v21.4s, v5.4h, v16.4h\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"smlal2 v22.4s, v5.8h, v16.8h\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"ld1 {v16.8b}, [x13]\n"
"smlal v21.4s, v6.4h, v12.4h\n"
"smlal2 v22.4s, v6.8h, v12.8h\n"
"smlal v23.4s, v0.4h, v12.4h\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"smlal2 v24.4s, v0.8h, v12.8h\n"
"smlal v21.4s, v7.4h, v13.4h\n"
"smlal2 v22.4s, v7.8h, v13.8h\n"
"smlal v23.4s, v1.4h, v13.4h\n"
"smlal2 v24.4s, v1.8h, v13.8h\n"
"smlal v21.4s, v8.4h, v17.4h\n"
"smlal2 v22.4s, v8.8h, v17.8h\n"
"smlal v23.4s, v2.4h, v17.4h\n"
"smlal2 v24.4s, v2.8h, v17.8h\n"
"ld1 {v26.4s, v27.4s}, [%[bias_ptr]]\n"
"scvtf v21.4s, v21.4s\n"
"scvtf v22.4s, v22.4s\n"
"fmul v21.4s, v21.4s, v30.4s\n"
"fmul v22.4s, v22.4s, v31.4s\n"
"fadd v21.4s, v21.4s, v26.4s\n"
"fadd v22.4s, v22.4s, v27.4s\n"
"dup v26.4s, w3\n"
"fmax v21.4s, v21.4s, v26.4s\n"
"fmin v21.4s, v21.4s, v29.4s\n"
"fmax v22.4s, v22.4s, v26.4s\n"
"fmin v22.4s, v22.4s, v29.4s\n"
"st1 {v21.4s, v22.4s}, [x6]\n"
"fcvtms v21.4s, v21.4s\n"
"fcvtms v22.4s, v22.4s\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"smlal v23.4s, v3.4h, v9.4h\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"smlal2 v24.4s, v3.8h, v9.8h\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"smlal v23.4s, v4.4h, v10.4h\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"smlal2 v24.4s, v4.8h, v10.8h\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"smlal v23.4s, v5.4h, v11.4h\n"
"smlal2 v24.4s, v5.8h, v11.8h\n"
"smlal v23.4s, v6.4h, v14.4h\n"
"smlal2 v24.4s, v6.8h, v14.8h\n"
"smlal v23.4s, v7.4h, v15.4h\n"
"smlal2 v24.4s, v7.8h, v15.8h\n"
"smlal v23.4s, v8.4h, v16.4h\n"
"smlal2 v24.4s, v8.8h, v16.8h\n"
"ld1 {v26.4s, v27.4s}, [%[bias_ptr]]\n"
"scvtf v23.4s, v23.4s\n"
"scvtf v24.4s, v24.4s\n"
"fmul v23.4s, v23.4s, v30.4s\n"
"fmul v24.4s, v24.4s, v31.4s\n"
"fadd v23.4s, v23.4s, v26.4s\n"
"fadd v24.4s, v24.4s, v27.4s\n"
"dup v26.4s, w3\n"
"fmax v23.4s, v23.4s, v26.4s\n"
"fmin v23.4s, v23.4s, v29.4s\n"
"fmax v24.4s, v24.4s, v26.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
"st1 {v23.4s, v24.4s}, [x7]\n"
"fcvtms v23.4s, v23.4s\n"
"fcvtms v24.4s, v24.4s\n"
DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP ":\n"
"subs %w[output_window_height], %w[output_window_height], #2\n"
"add %[input_ptr], %[input_ptr], %[input_height_increment]\n"
"cmp %w[output_window_height], #2\n"
"add %[output_ptr], %[output_ptr], %[output_height_increment]\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_2_LOOP "b\n"
DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP ":\n"
"cmp %w[output_window_height], #1\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
DEPTHWISECONV_LABEL_HEIGHT_1 ":\n"
"mov x11, %[input_ptr]\n"
"mov x12, x11\n"
"add x13, x12, %[input_row_size]\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"add x15, x13, %[input_row_size]\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"mov x6, %[output_ptr]\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"mov w14, %w[output_window_width]\n"
// The height 1 / width 2 loop loads an extra 1x1 output in anticipation
// for the next iteration. Make sure |output_window_width| is large
// enough to handle the additional load, otherwise jump to the
// appropriate label to handle smaller widths.
"cmp w14, #2\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"movi v24.4s, #0\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"movi v25.4s, #0\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"movi v26.4s, #0\n"
"movi v27.4s, #0\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER "f\n"
"cmp w14, #1\n"
"beq " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"subs w14, w14, #2\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"cmp w14, #3\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"add x11, x11, %[input_width_increment]\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"mov x12, x11\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"add x13, x12, %[input_row_size]\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"add x15, x13, %[input_row_size]\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"ld1 {v9.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"ld1 {v10.8b}, [x12], %[input_depth]\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"ld1 {v11.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"ld1 {v12.8b}, [x13], %[input_depth]\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"ld1 {v13.8b}, [x13], %[input_depth]\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"ld1 {v14.8b}, [x13], %[input_depth]\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"ld1 {v15.8b}, [x15], %[input_depth]\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"ld1 {v16.8b}, [x15], %[input_depth]\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"ld1 {v17.8b}, [x15], %[input_depth]\n"
"saddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"saddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"saddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"saddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"saddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"ld1 {v28.4s, v29.4s}, [%[bias_ptr]]\n"
"scvtf v24.4s, v24.4s\n"
"scvtf v25.4s, v25.4s\n"
"scvtf v26.4s, v26.4s\n"
"scvtf v27.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v30.4s\n"
"fmul v25.4s, v25.4s, v31.4s\n"
"fmul v26.4s, v26.4s, v30.4s\n"
"fmul v27.4s, v27.4s, v31.4s\n"
"fadd v24.4s, v24.4s, v28.4s\n"
"fadd v25.4s, v25.4s, v29.4s\n"
"fadd v26.4s, v26.4s, v28.4s\n"
"fadd v27.4s, v27.4s, v29.4s\n"
"dup v28.4s, w3\n"
"dup v29.4s, w2\n"
"fmax v24.4s, v24.4s, v28.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
"fmax v25.4s, v25.4s, v28.4s\n"
"fmin v25.4s, v25.4s, v29.4s\n"
"fmax v26.4s, v26.4s, v28.4s\n"
"fmin v26.4s, v26.4s, v29.4s\n"
"fmax v27.4s, v27.4s, v28.4s\n"
"fmin v27.4s, v27.4s, v29.4s\n"
"dup v28.8h, w0\n"
"st1 {v24.4s, v25.4s}, [x6], x4\n"
"st1 {v26.4s, v27.4s}, [x6], x4\n"
"fcvtms v24.4s, v24.4s\n"
"fcvtms v25.4s, v25.4s\n"
"fcvtms v26.4s, v26.4s\n"
"fcvtms v27.4s, v27.4s\n"
"movi v25.4s, #0\n"
"saddw v9.8h, v28.8h, v9.8b\n"
"movi v27.4s, #0\n"
"saddw v10.8h, v28.8h, v10.8b\n"
"saddw v11.8h, v28.8h, v11.8b\n"
"saddw v12.8h, v28.8h, v12.8b\n"
"saddw v13.8h, v28.8h, v13.8b\n"
"saddw v14.8h, v28.8h, v14.8b\n"
"movi v24.4s, #0\n"
"saddw v15.8h, v28.8h, v15.8b\n"
"movi v26.4s, #0\n"
"saddw v16.8h, v28.8h, v16.8b\n"
"saddw v17.8h, v28.8h, v17.8b\n"
"bge " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP "b\n"
// At this point, there will be one of 2 width or 1 width leftover,
// not both.
"cmp w14, #2\n"
"blt " DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER "f\n"
// Handle last two horizontal outputs if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER ":\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"ld1 {v18.8b}, [x12], %[input_depth]\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"ld1 {v19.8b}, [x12]\n"
"smlal v26.4s, v0.4h, v11.4h\n"
"ld1 {v20.8b}, [x13], %[input_depth]\n"
"smlal2 v27.4s, v0.8h, v11.8h\n"
"ld1 {v21.8b}, [x13]\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"ld1 {v22.8b}, [x15], %[input_depth]\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"ld1 {v23.8b}, [x15]\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v26.4s, v3.4h, v14.4h\n"
"smlal2 v27.4s, v3.8h, v14.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v26.4s, v6.4h, v17.4h\n"
"smlal2 v27.4s, v6.8h, v17.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"saddw v18.8h, v28.8h, v18.8b\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"saddw v19.8h, v28.8h, v19.8b\n"
"smlal v26.4s, v1.4h, v18.4h\n"
"saddw v20.8h, v28.8h, v20.8b\n"
"smlal2 v27.4s, v1.8h, v18.8h\n"
"smlal v26.4s, v2.4h, v19.4h\n"
"saddw v21.8h, v28.8h, v21.8b\n"
"smlal2 v27.4s, v2.8h, v19.8h\n"
"smlal v26.4s, v4.4h, v20.4h\n"
"smlal v26.4s, v5.4h, v21.4h\n"
"smlal2 v27.4s, v4.8h, v20.8h\n"
"saddw v22.8h, v28.8h, v22.8b\n"
"smlal2 v27.4s, v5.8h, v21.8h\n"
"saddw v23.8h, v28.8h, v23.8b\n"
"smlal v26.4s, v7.4h, v22.4h\n"
"smlal2 v27.4s, v7.8h, v22.8h\n"
"smlal v26.4s, v8.4h, v23.4h\n"
"smlal2 v27.4s, v8.8h, v23.8h\n"
"ld1 {v28.4s, v29.4s}, [%[bias_ptr]]\n"
"scvtf v24.4s, v24.4s\n"
"scvtf v25.4s, v25.4s\n"
"scvtf v26.4s, v26.4s\n"
"scvtf v27.4s, v27.4s\n"
"fmul v24.4s, v24.4s, v30.4s\n"
"fmul v25.4s, v25.4s, v31.4s\n"
"fmul v26.4s, v26.4s, v30.4s\n"
"fmul v27.4s, v27.4s, v31.4s\n"
"fadd v24.4s, v24.4s, v28.4s\n"
"fadd v25.4s, v25.4s, v29.4s\n"
"fadd v26.4s, v26.4s, v28.4s\n"
"fadd v27.4s, v27.4s, v29.4s\n"
"dup v28.4s, w3\n"
"dup v29.4s, w2\n"
"fmax v24.4s, v24.4s, v28.4s\n"
"fmin v24.4s, v24.4s, v29.4s\n"
"fmax v25.4s, v25.4s, v28.4s\n"
"fmin v25.4s, v25.4s, v29.4s\n"
"fmax v26.4s, v26.4s, v28.4s\n"
"fmin v26.4s, v26.4s, v29.4s\n"
"fmax v27.4s, v27.4s, v28.4s\n"
"fmin v27.4s, v27.4s, v29.4s\n"
"dup v28.8h, w0\n"
"st1 {v24.4s, v25.4s}, [x6], x4\n"
"st1 {v26.4s, v27.4s}, [x6]\n"
"fcvtms v24.4s, v24.4s\n"
"fcvtms v25.4s, v25.4s\n"
"fcvtms v26.4s, v26.4s\n"
"fcvtms v27.4s, v27.4s\n"
"b " DEPTHWISECONV_LABEL_HEIGHT_1_END "f\n"
// Handle bottom right output if exists.
DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER ":\n"
"dup v29.8h, w2\n"
"smlal v24.4s, v0.4h, v9.4h\n"
"smlal2 v25.4s, v0.8h, v9.8h\n"
"smlal v24.4s, v1.4h, v10.4h\n"
"smlal2 v25.4s, v1.8h, v10.8h\n"
"smlal v24.4s, v2.4h, v11.4h\n"
"smlal2 v25.4s, v2.8h, v11.8h\n"
"smlal v24.4s, v3.4h, v12.4h\n"
"smlal2 v25.4s, v3.8h, v12.8h\n"
"smlal v24.4s, v4.4h, v13.4h\n"
"smlal2 v25.4s, v4.8h, v13.8h\n"
"smlal v24.4s, v5.4h, v14.4h\n"
"smlal2 v25.4s, v5.8h, v14.8h\n"
"smlal v24.4s, v6.4h, v15.4h\n"
"smlal2 v25.4s, v6.8h, v15.8h\n"
"smlal v24.4s, v7.4h, v16.4h\n"
"smlal2 v25.4s, v7.8h, v16.8h\n"
"smlal v24.4s, v8.4h, v17.4h\n"
"smlal2 v25.4s, v8.8h, v17.8h\n"
"ld1 {v26.4s, v27.4s}, [%[bias_ptr]]\n"
"scvtf v24.4s, v24.4s\n"
"scvtf v25.4s, v25.4s\n"
"fmul v24.4s, v24.4s, v30.4s\n"
"fmul v25.4s, v25.4s, v31.4s\n"
"fadd v24.4s, v24.4s, v26.4s\n"
"fadd v25.4s, v25.4s, v27.4s\n"
"dup v26.4s, w3\n"
"dup v27.4s, w2\n"
"fmax v24.4s, v24.4s, v26.4s\n"
"fmin v24.4s, v24.4s, v27.4s\n"
"fmax v25.4s, v25.4s, v26.4s\n"
"fmin v25.4s, v25.4s, v27.4s\n"
"st1 {v24.4s, v25.4s}, [x6]\n"
"fcvtms v24.4s, v24.4s\n"
"fcvtms v25.4s, v25.4s\n"
DEPTHWISECONV_LABEL_HEIGHT_1_END ":\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[output_window_height] "+r"(output_window_height)
:
// Inputs.
[input_scale] "r"(input_scale),
[bias_ptr] "r"(bias_ptr), [input_row_size] "r"(input_row_size),
[input_depth] "r"(input_depth),
[output_window_width] "r"(output_window_width),
[input_width_increment] "r"(input_width_increment),
[input_height_increment] "r"(input_height_increment),
[output_height_increment] "r"(output_height_increment),
[per_channel_scales] "r"(per_channel_scales),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29",
"v30", "v31",
// We use these general-purpose registers.
"x0", "x2", "x3", "x4", "x5", "x6", "x7",
"x10", "x11", "x12", "x13", "x14", "x15",
"x19", "x20");
#undef DEPTHWISECONV_LABEL_HEIGHT_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_2_WIDTH_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_2_AFTER_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LOOP
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_1_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_WIDTH_2_LEFTOVER
#undef DEPTHWISECONV_LABEL_HEIGHT_1_END
}
};
template <>
struct DepthwiseConvHybridPartialPerChannel<
DepthwiseConvOutputRounding::kUpward, EdgeType::kCenter, 1, 1> {
static inline void Run(const float* input_scale, const int8* input_ptr,
const int8* filter_ptr, const float* bias_ptr,
float* output_ptr, const float* per_channel_scales,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 1x1 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the 1x1 input and filter values.
//
// Use v6-v7 to hold output_multiplier & v10-v11 to hold output_shift.
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"dup v26.8h, w9\n"
"ldr w9, [%[input_scale]]\n"
"cmp x11, #16\n"
"dup v28.4s, w9\n"
"ldr w9, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w10, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.4s, w9\n"
"dup v31.4s, w10\n"
"movi v16.4s, #0\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"movi v17.4s, #0\n"
"sshll v0.8h, v0.8b, #0\n"
"ld1 {v6.4s}, [%[per_channel_scales]], #16\n"
"fmul v6.4s, v6.4s, v28.4s\n"
"ld1 {v10.4s}, [%[bias_ptr]], #16\n"
"ld1 {v7.4s}, [%[per_channel_scales]], #16\n"
"fmul v7.4s, v7.4s, v28.4s\n"
"ld1 {v11.4s}, [%[bias_ptr]], #16\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x11, x11, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x11, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"scvtf v16.4s, v16.4s\n"
"scvtf v17.4s, v17.4s\n"
"fmul v16.4s, v16.4s, v6.4s\n"
"fmul v17.4s, v17.4s, v7.4s\n"
"fadd v16.4s, v16.4s, v10.4s\n"
"fadd v17.4s, v17.4s, v11.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]], #32\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"movi v16.4s, #0\n"
"sshll v0.8h, v0.8b, #0\n"
"movi v17.4s, #0\n"
"ld1 {v6.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v10.4s}, [%[bias_ptr]], #16\n"
"ld1 {v7.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v11.4s}, [%[bias_ptr]], #16\n"
"fmul v6.4s, v6.4s, v28.4s\n"
"fmul v7.4s, v7.4s, v28.4s\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"scvtf v16.4s, v16.4s\n"
"scvtf v17.4s, v17.4s\n"
"fmul v16.4s, v16.4s, v6.4s\n"
"fmul v17.4s, v17.4s, v7.4s\n"
"fadd v16.4s, v16.4s, v10.4s\n"
"fadd v17.4s, v17.4s, v11.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]]\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[per_channel_scales] "+r"(per_channel_scales)
:
// Inputs.
[params_ptr] "r"(params_ptr), [input_scale] "r"(input_scale)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v6", "v7", "v8", "v10", "v11", "v16", "v17", "v18", "v19",
"v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x9", "x10", "x11");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvHybridPartialPerChannel<
DepthwiseConvOutputRounding::kUpward, EdgeType::kCorner, 1, 1> {
static inline void Run(const float* input_scale, const int8* input_ptr,
const int8* filter_ptr, const float* bias_ptr,
float* output_ptr, const float* per_channel_scales,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x2 input and
// filter values.
//
// Use v4-v5 to hold output_multiplier & v6-v7 to hold output_shift.
// Load input and filter values.
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ldr x9, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"cmp x15, #16\n"
"add x12, %[input_ptr], x15\n"
"add x13, %[input_ptr], x9\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"add x14, x13, x15\n"
"ld1 {v9.8b}, [x12], #8\n"
"ldr x6, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x9, %[filter_ptr], x15\n"
"ld1 {v10.8b}, [x13], #8\n"
"add x10, %[filter_ptr], x6\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"add x11, x10, x15\n"
"ld1 {v1.8b}, [x9], #8\n"
"ld1 {v2.8b}, [x10], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
// Load constants.
"ldr w6, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"dup v26.8h, w6\n"
"ldr w6, [%[input_scale]]\n"
"dup v28.4s, w6\n"
"ldr w6, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w7, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.4s, w6\n"
"dup v31.4s, w7\n"
// Loads output_multiplier & output_shift.
"ld1 {v4.4s}, [%[bias_ptr]], #16\n"
"ld1 {v6.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v5.4s}, [%[bias_ptr]], #16\n"
"ld1 {v7.4s}, [%[per_channel_scales]], #16\n"
"fmul v6.4s, v6.4s, v28.4s\n"
"fmul v7.4s, v7.4s, v28.4s\n"
// Add input and filter offsets.
"saddw v8.8h, v26.8h, v8.8b\n"
"movi v16.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"movi v17.4s, #0\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"subs x15, x15, #8\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [%[input_ptr]], #8\n"
"cmp x15, #16\n"
"ld1 {v0.8b}, [%[filter_ptr]], #8\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], #8\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"ld1 {v1.8b}, [x9], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], #8\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v2.8b}, [x10], #8\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x14], #8\n"
"ld1 {v3.8b}, [x11], #8\n"
"scvtf v16.4s, v16.4s\n"
"scvtf v17.4s, v17.4s\n"
"fmul v16.4s, v16.4s, v6.4s\n"
"fmul v17.4s, v17.4s, v7.4s\n"
"fadd v16.4s, v16.4s, v4.4s\n"
"fadd v17.4s, v17.4s, v5.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]], #32\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"movi v16.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"movi v17.4s, #0\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"ld1 {v4.4s}, [%[bias_ptr]], #16\n"
"ld1 {v6.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v5.4s}, [%[bias_ptr]], #16\n"
"ld1 {v7.4s}, [%[per_channel_scales]], #16\n"
"fmul v6.4s, v6.4s, v28.4s\n"
"fmul v7.4s, v7.4s, v28.4s\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"scvtf v16.4s, v16.4s\n"
"scvtf v17.4s, v17.4s\n"
"fmul v16.4s, v16.4s, v6.4s\n"
"fmul v17.4s, v17.4s, v7.4s\n"
"fadd v16.4s, v16.4s, v4.4s\n"
"fadd v17.4s, v17.4s, v5.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]]\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[per_channel_scales] "+r"(per_channel_scales)
:
// Inputs.
[input_scale] "r"(input_scale),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v16", "v17","v18", "v19", "v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvHybridPartialPerChannel<
DepthwiseConvOutputRounding::kUpward, EdgeType::kHorizontal, 1, 1> {
static inline void Run(const float* input_scale, const int8* input_ptr,
const int8* filter_ptr, const float* bias_ptr,
float* output_ptr, const float* per_channel_scales,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 2x3 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 2x3 input and
// filter values.
//
// Use v6-v7 to hold output_multiplier & v14-v15 to hold output_shift.
// Load input and filter values.
"ldr x7, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x9, %[filter_ptr]\n"
"ldr x14, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"ld1 {v8.8b}, [x12], x7\n"
"add x10, x9, x14\n"
"ld1 {v9.8b}, [x12], x7\n"
"cmp x15, #16\n"
"ld1 {v10.8b}, [x12]\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13], x7\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x13], x7\n"
"ld1 {v13.8b}, [x13]\n"
"ld1 {v0.8b}, [x9], x7\n"
"ld1 {v1.8b}, [x9], x7\n"
"ld1 {v2.8b}, [x9]\n"
"ld1 {v3.8b}, [x10], x7\n"
"ld1 {v4.8b}, [x10], x7\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[input_scale]]\n"
"dup v28.4s, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.4s, w12\n"
"dup v31.4s, w13\n"
// Loads output_multiplier & output_shift.
"ld1 {v6.4s}, [%[bias_ptr]], #16\n"
"ld1 {v14.4s}, [%[per_channel_scales]], #16\n"
"fmul v14.4s, v14.4s, v28.4s\n"
"ld1 {v7.4s}, [%[bias_ptr]], #16\n"
"ld1 {v15.4s}, [%[per_channel_scales]], #16\n"
"fmul v15.4s, v15.4s, v28.4s\n"
// Add input and filter offsets.
"saddw v8.8h, v26.8h, v8.8b\n"
"movi v16.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"movi v17.4s, #0\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x9, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x7\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x10, x9, x14\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"ld1 {v9.8b}, [x12], x7\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x12]\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x9], x7\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13], x7\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x9], x7\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x13], x7\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9]\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x13]\n"
"scvtf v16.4s, v16.4s\n"
"fmul v16.4s, v16.4s, v14.4s\n"
"ld1 {v3.8b}, [x10], x7\n"
"scvtf v17.4s, v17.4s\n"
"fmul v17.4s, v17.4s, v15.4s\n"
"ld1 {v4.8b}, [x10], x7\n"
"fadd v16.4s, v16.4s, v6.4s\n"
"ld1 {v5.8b}, [x10]\n"
"fadd v17.4s, v17.4s, v7.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]], #32\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"movi v16.4s, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"movi v17.4s, #0\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v6.4s}, [%[bias_ptr]], #16\n"
"ld1 {v14.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v7.4s}, [%[bias_ptr]], #16\n"
"ld1 {v15.4s}, [%[per_channel_scales]], #16\n"
"fmul v14.4s, v14.4s, v28.4s\n"
"fmul v15.4s, v15.4s, v28.4s\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"scvtf v16.4s, v16.4s\n"
"scvtf v17.4s, v17.4s\n"
"fmul v16.4s, v16.4s, v14.4s\n"
"fmul v17.4s, v17.4s, v15.4s\n"
"fadd v16.4s, v16.4s, v6.4s\n"
"fadd v17.4s, v17.4s, v7.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]]\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr),
[per_channel_scales] "+r"(per_channel_scales),
[bias_ptr] "+r"(bias_ptr)
:
// Inputs.
[input_scale] "r"(input_scale), [params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
template <>
struct DepthwiseConvHybridPartialPerChannel<
DepthwiseConvOutputRounding::kUpward, EdgeType::kVertical, 1, 1> {
static inline void Run(const float* input_scale, const int8* input_ptr,
const int8* filter_ptr, const float* bias_ptr,
float* output_ptr, const float* per_channel_scales,
const DepthwiseConvParams* params_ptr) {
TFLITE_DCHECK_EQ(params_ptr->filter_offset, 0);
#define DEPTHWISECONV_LABEL_DEPTH_8_LOOP "1"
#define DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "2"
asm volatile(
// Performs depthwise convolutions for an input window of size 3x2 and
// padding of 1 across the full depth. Expects |input_ptr| and
// |filter_ptr| to be pointing to the beginning of the 3x2 input and
// filter values.
//
// Use v6-v7 to hold output_multiplier & v14-v15 to hold output_shift.
// Load input and filter values.
"ldr x6, [%[params_ptr], #" STR(OFFSET_INPUT_DEPTH) "]\n"
"mov x12, %[input_ptr]\n"
"ldr x11, [%[params_ptr], #" STR(OFFSET_INPUT_ROW_SIZE) "]\n"
"mov x7, %[filter_ptr]\n"
"ldr x5, [%[params_ptr], #" STR(OFFSET_FILTER_ROW_SIZE) "]\n"
"add x13, x12, x11\n"
"ldr x15, [%[params_ptr], #" STR(OFFSET_OUTPUT_DEPTH) "]\n"
"add x14, x13, x11\n"
"ld1 {v8.8b}, [x12], x6\n"
"add x9, x7, x5\n"
"ld1 {v9.8b}, [x12]\n"
"cmp x15, #16\n"
"add x10, x9, x5\n"
"ld1 {v10.8b}, [x13], x6\n"
"add %[input_ptr], %[input_ptr], #8\n"
"ld1 {v11.8b}, [x13]\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"ld1 {v12.8b}, [x14], x6\n"
"ld1 {v13.8b}, [x14]\n"
"ld1 {v0.8b}, [x7], x6\n"
"ld1 {v1.8b}, [x7]\n"
"ld1 {v2.8b}, [x9], x6\n"
"ld1 {v3.8b}, [x9]\n"
"ld1 {v4.8b}, [x10], x6\n"
"ld1 {v5.8b}, [x10]\n"
// Load constants.
"ldr w12, [%[params_ptr], #" STR(OFFSET_INPUT_OFFSET) "]\n"
"dup v26.8h, w12\n"
"ldr w12, [%[input_scale]]\n"
"dup v28.4s, w12\n"
"ldr w12, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MIN) "]\n"
"ldr w13, [%[params_ptr], #" STR(OFFSET_FLOAT_OUTPUT_ACTIVATION_MAX) "]\n"
"dup v30.4s, w12\n"
"dup v31.4s, w13\n"
"ld1 {v6.4s}, [%[bias_ptr]], #16\n"
"ld1 {v14.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v7.4s}, [%[bias_ptr]], #16\n"
"ld1 {v15.4s}, [%[per_channel_scales]], #16\n"
"fmul v14.4s, v14.4s, v28.4s\n"
"fmul v15.4s, v15.4s, v28.4s\n"
// Add input and filter offsets.
"saddw v8.8h, v26.8h, v8.8b\n"
"movi v16.4s, #0\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"movi v17.4s, #0\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"blt " DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP "f\n"
//"loop_%=:\n"
DEPTHWISECONV_LABEL_DEPTH_8_LOOP ":\n"
"mov x12, %[input_ptr]\n"
"subs x15, x15, #8\n"
"add x13, x12, x11\n"
"cmp x15, #16\n"
"add x14, x13, x11\n"
"add %[input_ptr], %[input_ptr], #8\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"mov x7, %[filter_ptr]\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"ld1 {v8.8b}, [x12], x6\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"add x9, x7, x5\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"add x10, x9, x5\n"
"ld1 {v9.8b}, [x12]\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"add %[filter_ptr], %[filter_ptr], #8\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"ld1 {v10.8b}, [x13], x6\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"ld1 {v0.8b}, [x7], x6\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"ld1 {v11.8b}, [x13]\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"ld1 {v1.8b}, [x7]\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"ld1 {v12.8b}, [x14], x6\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"ld1 {v2.8b}, [x9], x6\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"ld1 {v13.8b}, [x14]\n"
"scvtf v16.4s, v16.4s\n"
"fmul v16.4s, v16.4s, v14.4s\n"
"ld1 {v3.8b}, [x9]\n"
"scvtf v17.4s, v17.4s\n"
"fmul v17.4s, v17.4s, v15.4s\n"
"ld1 {v4.8b}, [x10], x6\n"
"fadd v16.4s, v16.4s, v6.4s\n"
"ld1 {v5.8b}, [x10]\n"
"fadd v17.4s, v17.4s, v7.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]], #32\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
"saddw v8.8h, v26.8h, v8.8b\n"
"saddw v9.8h, v26.8h, v9.8b\n"
"saddw v10.8h, v26.8h, v10.8b\n"
"saddw v11.8h, v26.8h, v11.8b\n"
"saddw v12.8h, v26.8h, v12.8b\n"
"saddw v13.8h, v26.8h, v13.8b\n"
"sshll v0.8h, v0.8b, #0\n"
"sshll v1.8h, v1.8b, #0\n"
"sshll v2.8h, v2.8b, #0\n"
"movi v16.4s, #0\n"
"sshll v3.8h, v3.8b, #0\n"
"movi v17.4s, #0\n"
"sshll v4.8h, v4.8b, #0\n"
"sshll v5.8h, v5.8b, #0\n"
"ld1 {v6.4s}, [%[bias_ptr]], #16\n"
"ld1 {v14.4s}, [%[per_channel_scales]], #16\n"
"ld1 {v7.4s}, [%[bias_ptr]], #16\n"
"ld1 {v15.4s}, [%[per_channel_scales]], #16\n"
"fmul v14.4s, v14.4s, v28.4s\n"
"fmul v15.4s, v15.4s, v28.4s\n"
"bge " DEPTHWISECONV_LABEL_DEPTH_8_LOOP "b\n"
DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP ":\n"
"smlal v16.4s, v0.4h, v8.4h\n"
"smlal2 v17.4s, v0.8h, v8.8h\n"
"smlal v16.4s, v1.4h, v9.4h\n"
"smlal2 v17.4s, v1.8h, v9.8h\n"
"smlal v16.4s, v2.4h, v10.4h\n"
"smlal2 v17.4s, v2.8h, v10.8h\n"
"smlal v16.4s, v3.4h, v11.4h\n"
"smlal2 v17.4s, v3.8h, v11.8h\n"
"smlal v16.4s, v4.4h, v12.4h\n"
"smlal2 v17.4s, v4.8h, v12.8h\n"
"smlal v16.4s, v5.4h, v13.4h\n"
"smlal2 v17.4s, v5.8h, v13.8h\n"
"scvtf v16.4s, v16.4s\n"
"scvtf v17.4s, v17.4s\n"
"fmul v16.4s, v16.4s, v14.4s\n"
"fmul v17.4s, v17.4s, v15.4s\n"
"fadd v16.4s, v16.4s, v6.4s\n"
"fadd v17.4s, v17.4s, v7.4s\n"
"fmax v16.4s, v16.4s, v30.4s\n"
"fmin v16.4s, v16.4s, v31.4s\n"
"fmax v17.4s, v17.4s, v30.4s\n"
"fmin v17.4s, v17.4s, v31.4s\n"
"st1 {v16.4s, v17.4s}, [%[output_ptr]]\n"
"fcvtms v16.4s, v16.4s\n"
"fcvtms v17.4s, v17.4s\n"
:
// Outputs.
[filter_ptr] "+r"(filter_ptr), [input_ptr] "+r"(input_ptr),
[output_ptr] "+r"(output_ptr), [bias_ptr] "+r"(bias_ptr),
[per_channel_scales] "+r"(per_channel_scales)
:
// Inputs.
[input_scale] "r"(input_scale),
[params_ptr] "r"(params_ptr)
:
// Clobbers.
"cc", "memory",
// We use these NEON registers.
"v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10",
"v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19",
"v26", "v28", "v30", "v31",
// We use these general-purpose registers.
"x5", "x6", "x7", "x9", "x10", "x11", "x12", "x13", "x14", "x15");
#undef DEPTHWISECONV_LABEL_DEPTH_8_LOOP
#undef DEPTHWISECONV_LABEL_DEPTH_8_AFTER_LOOP
}
};
#undef OFFSET_INPUT_DEPTH
#undef OFFSET_INPUT_ROW_SIZE
#undef OFFSET_OUTPUT_DEPTH
#undef OFFSET_OUTPUT_ROW_SIZE
#undef OFFSET_INPUT_OFFSET
#undef OFFSET_OUTPUT_OFFSET
#undef OFFSET_OUTPUT_MULTIPLIER
#undef OFFSET_OUTPUT_ACTIVATION_MIN
#undef OFFSET_OUTPUT_ACTIVATION_MAX
#undef OFFSET_OUTPUT_RIGHT_SHIFT
#undef OFFSET_INPUT_WIDTH
#undef OFFSET_INPUT_HEIGHT
#undef OFFSET_OUTPUT_WIDTH
#undef OFFSET_OUTPUT_HEIGHT
#undef OFFSET_OUTPUT_FLOAT_ACTIVATION_MIN
#undef OFFSET_OUTPUT_FLOAT_ACTIVATION_MAX
template <DepthwiseConvOutputRounding output_rounding, int32 kStrideWidth,
int32 kStrideHeight>
struct DepthwiseConvHybridThroughDepthPerChannel {
// Runs the DepthwiseConvWindowPerChannel kernels through the depth dimension
// from |start_depth| to |end_depth|. Keep this not inlined to maintain a
// small binary size. We use a DepthwiseConvParams struct for read only params
// to minimize call overhead.
static void __attribute__((noinline))
Run(const float* input_scale, const int8* input_ptr, const int8* filter_ptr,
const float* bias_ptr, float* output_ptr, int64_t start_depth,
int64_t end_depth, int64_t input_depth, int64_t input_row_size,
int32 output_window_height, int32 output_window_width,
const float* per_channel_scales, const DepthwiseConvParams& params) {
for (; start_depth <= end_depth - 8; start_depth += 8) {
DepthwiseConvHybridWindowPerChannel<output_rounding, 8, kStrideWidth,
kStrideHeight>::Run(input_scale,
input_ptr, filter_ptr,
bias_ptr, output_ptr,
input_depth,
input_row_size,
output_window_height,
output_window_width,
per_channel_scales,
¶ms);
input_ptr += 8;
output_ptr += 8;
filter_ptr += 8;
bias_ptr += 8;
per_channel_scales += 8;
}
}
};
template <DepthwiseConvOutputRounding output_rounding, int32 kStrideWidth,
int32 kStrideHeight>
struct DepthwiseConvHybridMultiRowPerChannel {
using ConvKernel =
DepthwiseConvHybridThroughDepthPerChannel<output_rounding, kStrideWidth,
kStrideHeight>;
static inline void Run(const float* input_scale, const int8* input_data,
int32 start_x, int32 end_x, const int8* filter_data,
const float* bias_data, float* output_data,
const float* per_channel_scales,
const DepthwiseConvParams& params,
const ShuffleParams& shuffle_params,
int8* shuffle_workspace) {
TFLITE_DCHECK(
shuffle_params.input_height ==
get_shuffle_input_size(kStrideHeight, shuffle_params.output_height));
TFLITE_DCHECK(
shuffle_params.input_width ==
get_shuffle_input_size(kStrideWidth, shuffle_params.output_width));
TFLITE_DCHECK_LE(
64 * shuffle_params.input_width * shuffle_params.input_height,
kDepthwiseConvScratchWorkspaceSize);
int32 out_x = start_x;
// Run shuffling on inputs with sufficiently large depth and width. When
// these parameters are large enough, more time is taken to load inputs
// from memory. At this point, it becomes useful to prefetch and
// preshuffle the input data to maximize locality.
if (params.output_depth > 64 ||
(params.output_depth <= 64 && params.input_width > 150)) {
for (; out_x <= (end_x - shuffle_params.output_width);
out_x += shuffle_params.output_width) {
const int8* input_ptr = input_data;
const float* bias_ptr = bias_data;
const int8* filter_ptr = filter_data;
const float* per_channel_scales_ptr = per_channel_scales;
float* output_ptr = output_data;
int64_t depth = 0;
const int64_t shuffle_row_size = 64 * shuffle_params.input_width;
for (; depth <= params.output_depth - 64; depth += 64) {
// Preload.
const int8* h_ptr = input_ptr;
for (int32 i = 0; i < shuffle_params.input_height; i++) {
const int8* ptr = h_ptr;
for (int32 j = 0; j < shuffle_params.input_width; j++) {
optimized_ops_preload_l1_keep(ptr);
ptr += params.input_depth;
}
h_ptr += params.input_row_size;
}
// For a large enough input, shuffle into buckets.
ShuffleInput(input_ptr, params.input_depth, params.input_width,
params.input_height, 64, shuffle_params.input_width,
shuffle_params.input_height, shuffle_workspace);
ConvKernel::Run(input_scale,
shuffle_workspace, filter_ptr, bias_ptr, output_ptr,
0, 64, 64, shuffle_row_size,
shuffle_params.output_height,
shuffle_params.output_width, per_channel_scales_ptr,
params);
input_ptr += 64;
output_ptr += 64;
filter_ptr += 64;
bias_ptr += 64;
per_channel_scales_ptr += 64;
}
// Preload.
const int8* h_ptr = input_ptr;
for (int32 i = 0; i < shuffle_params.input_height; i++) {
const int8* ptr = h_ptr;
for (int32 j = 0; j < shuffle_params.input_width; j++) {
optimized_ops_preload_l1_keep(ptr);
ptr += params.input_depth;
}
h_ptr += params.input_row_size;
}
// Handle leftover depth.
ConvKernel::Run(input_scale, input_ptr,
filter_ptr, bias_ptr, output_ptr, depth,
params.output_depth, params.input_depth,
params.input_row_size, shuffle_params.output_height,
shuffle_params.output_width, per_channel_scales_ptr,
params);
input_data +=
shuffle_params.output_width * kStrideWidth * params.input_depth;
output_data += shuffle_params.output_width * params.output_depth;
}
}
const int32 output_leftover_width = end_x - out_x;
if (output_leftover_width > 0) {
ConvKernel::Run(input_scale, input_data, filter_data,
bias_data, output_data, 0, params.output_depth,
params.input_depth, params.input_row_size,
shuffle_params.output_height, output_leftover_width,
per_channel_scales, params);
}
}
};
// Processes the borders of the input for pad_width and pad_height = 1.
// Calls 4 asm kernels:
// * 1x1 input shape.
// * Corner edges.
// * Horizontal edges.
// * Vertical edges.
template <DepthwiseConvOutputRounding output_rounding>
inline void DepthwiseConvHybridHandlePaddingPerChannel(
const float* input_scale, const int8* input_data,
const int8* filter_data, const float* bias_data, float* output_data,
const float* per_channel_scales, const DepthwiseConvParams& params) {
if (params.input_width == 1 && params.input_height == 1) {
const int8* filter_ptr =
filter_data + params.filter_row_size + params.output_depth;
DepthwiseConvHybridPartialPerChannel<output_rounding, EdgeType::kCenter, 1,
1>::Run(input_scale, input_data,
filter_ptr, bias_data, output_data,
per_channel_scales, ¶ms);
return;
}
const int32 out_x_start_corner = 0;
const int32 out_x_end_corner = params.output_width - 1;
const int32 out_y_start_corner = 0;
const int32 out_y_end_corner = params.output_height - 1;
// Handle top row.
const int8* input_ptr = input_data;
const int8* filter_ptr =
filter_data + params.filter_row_size + params.output_depth;
float* output_ptr = output_data;
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data,
output_ptr, per_channel_scales, ¶ms);
input_ptr += (params.stride_width - 1) * params.input_depth;
filter_ptr = filter_data + params.filter_row_size;
output_ptr += params.output_depth;
for (int32 out_x = out_x_start_corner + 1; out_x < out_x_end_corner;
out_x++) {
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kHorizontal, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
input_ptr += params.stride_width * params.input_depth;
output_ptr += params.output_depth;
}
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
// Handle left side.
input_ptr = input_data + (params.stride_width - 1) * params.input_row_size;
filter_ptr = filter_data + params.input_depth;
output_ptr = output_data + params.output_row_size;
for (int32 out_y = out_y_start_corner + 1; out_y < out_y_end_corner;
out_y++) {
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kVertical, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
input_ptr += params.stride_width * params.input_row_size;
output_ptr += params.output_row_size;
}
// Handle right side.
input_ptr = input_data + (params.input_width - 2) * params.input_depth +
(params.stride_width - 1) * params.input_row_size;
filter_ptr = filter_data;
output_ptr = output_data + params.output_row_size +
(params.output_width - 1) * params.output_depth;
for (int32 out_y = out_y_start_corner + 1; out_y < out_y_end_corner;
out_y++) {
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kVertical, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
input_ptr += params.stride_width * params.input_row_size;
output_ptr += params.output_row_size;
}
// Handle bottom row.
input_ptr = input_data + (params.input_height - 2) * params.input_row_size;
filter_ptr = filter_data + params.output_depth;
output_ptr =
output_data + (params.output_height - 1) * params.output_row_size;
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
input_ptr += (params.stride_width == 1) ? 0 : params.input_depth;
filter_ptr = filter_data;
output_ptr += params.output_depth;
for (int32 out_x = out_x_start_corner + 1; out_x < out_x_end_corner;
out_x++) {
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kHorizontal, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
input_ptr += params.stride_width * params.input_depth;
output_ptr += params.output_depth;
}
DepthwiseConvHybridPartialPerChannel<
output_rounding, EdgeType::kCorner, 1, 1>::Run(
input_scale, input_ptr, filter_ptr, bias_data, output_ptr,
per_channel_scales, ¶ms);
}
template <DepthwiseConvOutputRounding output_rounding>
inline void DepthwiseConvHybrid3x3FilterPerChannel(
const DepthwiseParams& rt_params, const float* input_scales,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& filter_shape, const int8* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scales, const int32* input_offsets,
int thread_start, int thread_end, int thread_dim) {
DepthwiseConvParams params;
const int32 stride_width = rt_params.stride_width;
const int32 stride_height = rt_params.stride_height;
const int32 pad_width = rt_params.padding_values.width;
const int32 pad_height = rt_params.padding_values.height;
const int32 depth_multiplier = rt_params.depth_multiplier;
const float output_activation_min = rt_params.float_activation_min;
const float output_activation_max = rt_params.float_activation_max;
const int32 filter_offset = rt_params.weights_offset;
params.input_depth = input_shape.Dims(3);
params.input_width = input_shape.Dims(2);
params.input_height = input_shape.Dims(1);
params.input_row_size = params.input_depth * params.input_width;
params.stride_width = stride_width; params.stride_height = stride_height;
params.output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
params.output_width = output_shape.Dims(2);
params.output_height = output_shape.Dims(1);
params.output_row_size = params.output_depth * params.output_width;
params.filter_offset = filter_offset;
params.float_output_activation_min = output_activation_min;
params.float_output_activation_max = output_activation_max;
const int32 filter_height = filter_shape.Dims(1);
const int32 filter_width = filter_shape.Dims(2);
params.filter_row_size = params.output_depth * filter_width;
// Algorithm assumes below constraints. It is optimized for depth
// multiplier of 1, 3x3 filter, no padding and strides 1 and 2.
TFLITE_DCHECK(params.output_depth == params.input_depth * depth_multiplier);
TFLITE_DCHECK(depth_multiplier == 1);
TFLITE_DCHECK(filter_height == 3);
TFLITE_DCHECK(filter_width == 3);
TFLITE_DCHECK(stride_height == 1 || stride_height == 2);
TFLITE_DCHECK(stride_width == 1 || stride_width == 2);
TFLITE_DCHECK(stride_width == stride_height);
TFLITE_DCHECK(pad_height == 0 || pad_height == 1);
TFLITE_DCHECK(pad_width == 0 || pad_width == 1);
TFLITE_DCHECK(pad_width == pad_height);
TFLITE_DCHECK(thread_dim == 0 || thread_dim == 1);
const int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
const int64_t input_batch_size = params.input_row_size * params.input_height;
const int64_t output_batch_size =
params.output_row_size * params.output_height;
ShuffleParams one_row_shuffle_params, two_row_shuffle_params,
four_row_shuffle_params, eight_row_shuffle_params;
if (stride_width == 1) {
one_row_shuffle_params = ShuffleParams(30, 1, 1, 1);
two_row_shuffle_params = ShuffleParams(22, 2, 1, 1);
four_row_shuffle_params = ShuffleParams(14, 4, 1, 1);
eight_row_shuffle_params = ShuffleParams(8, 8, 1, 1);
} else {
one_row_shuffle_params = ShuffleParams(14, 1, 2, 2);
two_row_shuffle_params = ShuffleParams(8, 2, 2, 2);
four_row_shuffle_params = ShuffleParams(4, 4, 2, 2);
eight_row_shuffle_params = ShuffleParams(2, 8, 2, 2);
}
using conv_multirow_func_t =
decltype(
&DepthwiseConvHybridMultiRowPerChannel<output_rounding, 1, 1>::Run);
conv_multirow_func_t conv_multirow_func =
DepthwiseConvHybridMultiRowPerChannel<output_rounding, 1, 1>::Run;
if (stride_width == 2) {
conv_multirow_func =
DepthwiseConvHybridMultiRowPerChannel<output_rounding, 2, 2>::Run;
}
// Allocate maximum memory needed for shuffled input.
int8 shuffle_workspace[kDepthwiseConvScratchWorkspaceSize];
int batch_start = 0;
int batch_end = batches;
int row_start = 0;
int row_end = params.output_height;
switch (thread_dim) {
case 0:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, batches);
batch_start = thread_start;
batch_end = thread_end;
break;
case 1:
TFLITE_DCHECK_GE(thread_start, 0);
TFLITE_DCHECK_LE(thread_end, params.output_height);
row_start = thread_start;
row_end = thread_end;
break;
}
for (int32 b = batch_start; b < batch_end; ++b) {
// input_ptr and output_ptr point to the start of each batch
const int8* input_ptr = input_data + b * input_batch_size;
float* output_ptr = output_data + b * output_batch_size;
params.input_offset = -input_offsets[b];
int32 out_x = 0;
int32 out_y = row_start;
int32 end_x = params.output_width;
int32 end_y = row_end;
if (pad_width == 1 && pad_height == 1) {
DepthwiseConvHybridHandlePaddingPerChannel<output_rounding>(
input_scales + b, input_ptr, filter_data,
bias_data, output_ptr, per_channel_scales, params);
// Update extents now that the edges have been handled.
out_x = 1;
end_x = params.output_width - 1;
out_y = std::max(1, out_y);
end_y = std::min(params.output_height - 1, end_y);
}
// pad_width and pad_height can both be 0 or 1, depending on padding option,
// such as Padding_VALID / Padding_SAME.
const int in_x = (out_x * stride_width) - pad_width;
const int in_y = (out_y * stride_height) - pad_height;
// input_ptr and output_ptr point to (in_y, in_x) and (out_y, out_x),
// respectively. (in_y, in_x) and (out_y, out_x) change along with
// row_start.
input_ptr += in_y * params.input_row_size + in_x * params.input_depth;
output_ptr += out_y * params.output_row_size + out_x * params.output_depth;
// Shuffling shapes that maximize width over the shuffle workspace size
// perform better since the inputs are closer together, minimizing
// shuffling time.
//
// If the input shape has width large enough for the 2 row kernels,
// we prefer to use this. The innermost loop of the kernels handle
// 2 height x 2 width so this is the fastest path.
//
// If the input shape has smaller width but larger height, shuffling is
// still useful and can benefit from kernels 4 row and 8 row kernels.
// Handle 8 rows at a time.
if (params.input_width < four_row_shuffle_params.input_width) {
for (; out_y <= end_y - 8; out_y += 8) {
conv_multirow_func(input_scales + b, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
per_channel_scales, params, eight_row_shuffle_params,
shuffle_workspace);
input_ptr += 8 * stride_height * params.input_row_size;
output_ptr += 8 * params.output_row_size;
}
}
// Handle 4 rows at a time.
if (params.input_width < two_row_shuffle_params.input_width) {
for (; out_y <= end_y - 4; out_y += 4) {
conv_multirow_func(input_scales + b, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
per_channel_scales, params, four_row_shuffle_params,
shuffle_workspace);
input_ptr += 4 * stride_height * params.input_row_size;
output_ptr += 4 * params.output_row_size;
}
}
// Handle 2 rows at a time.
for (; out_y <= end_y - 2; out_y += 2) {
conv_multirow_func(input_scales + b, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
per_channel_scales, params, two_row_shuffle_params,
shuffle_workspace);
input_ptr += 2 * stride_height * params.input_row_size;
output_ptr += 2 * params.output_row_size;
}
// Handle one row at a time.
for (; out_y < end_y; out_y++) {
conv_multirow_func(input_scales + b, input_ptr,
out_x, end_x, filter_data, bias_data, output_ptr,
per_channel_scales, params, one_row_shuffle_params,
shuffle_workspace);
input_ptr += stride_height * params.input_row_size;
output_ptr += params.output_row_size;
}
}
}
#endif // __aarch64__
#undef STR
#undef STR_UNEXPANDED
} // namespace depthwise_conv
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_DEPTHWISE_CONV_HYBRID_3X3_FILTER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv_hybrid_3x3_filter.h | C++ | apache-2.0 | 133,438 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("FullyConnectedInt8/8bit");
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int filter_rows = filter_shape.Dims(filter_dim_count - 2);
const int filter_cols = filter_shape.Dims(filter_dim_count - 1);
TFLITE_DCHECK_EQ(filter_shape.FlatSize(), filter_rows * filter_cols);
const int output_rows = output_shape.Dims(output_dim_count - 1);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
}
cpu_backend_gemm::MatrixParams<int8> lhs_params;
lhs_params.rows = filter_rows;
lhs_params.cols = filter_cols;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.zero_point = -filter_offset;
cpu_backend_gemm::MatrixParams<int8> rhs_params;
rhs_params.rows = filter_cols;
rhs_params.cols = batches;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.zero_point = -input_offset;
cpu_backend_gemm::MatrixParams<int8> dst_params;
dst_params.rows = filter_rows;
dst_params.cols = batches;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.zero_point = output_offset;
cpu_backend_gemm::GemmParams<int32, int8> gemm_params;
gemm_params.bias = bias_data;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
gemm_params.multiplier_fixedpoint = output_multiplier;
gemm_params.multiplier_exponent = output_shift;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_FULLY_CONNECTED_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h | C++ | apache-2.0 | 4,476 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_MEAN_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_MEAN_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
namespace tflite {
namespace optimized_integer_ops {
inline void MeanImpl(const tflite::MeanParams& op_params,
const RuntimeShape& input_shape, const int8_t* input_data,
int32 multiplier, int32 shift, int32 bias,
const RuntimeShape& output_shape, int8_t* output_data,
int start_depth, int end_depth) {
ruy::profiler::ScopeLabel label("Mean4D/Int8/MeanImpl");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(2);
const int output_width = output_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
constexpr static int32_t kMinValue = std::numeric_limits<int8_t>::min();
constexpr static int32_t kMaxValue = std::numeric_limits<int8_t>::max();
#ifdef USE_NEON
const int32x4_t bias_dup = vdupq_n_s32(bias);
const int32x4_t min_dup = vdupq_n_s32(kMinValue);
const int32x4_t max_dup = vdupq_n_s32(kMaxValue);
#endif // USE_NEON
for (int out_b = 0; out_b < output_batch; ++out_b) {
int out_d = start_depth;
#ifdef USE_NEON
for (; out_d <= end_depth - 16; out_d += 16) {
int32x4x4_t temp_sum;
temp_sum.val[0] = vdupq_n_s32(0);
temp_sum.val[1] = vdupq_n_s32(0);
temp_sum.val[2] = vdupq_n_s32(0);
temp_sum.val[3] = vdupq_n_s32(0);
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
const int8_t* input_data_ptr =
input_data + Offset(input_shape, out_b, in_h, in_w, out_d);
int8x16_t input_data_val = vld1q_s8(input_data_ptr);
int16x8_t input_data_low_shift =
vmovl_s8(vget_low_s8(input_data_val));
int16x8_t input_data_high_shift =
vmovl_s8(vget_high_s8(input_data_val));
int32x4_t input_low_low =
vmovl_s16(vget_low_s16(input_data_low_shift));
int32x4_t input_high_low =
vmovl_s16(vget_high_s16(input_data_low_shift));
int32x4_t input_low_high =
vmovl_s16(vget_low_s16(input_data_high_shift));
int32x4_t input_high_high =
vmovl_s16(vget_high_s16(input_data_high_shift));
temp_sum.val[0] = vaddq_s32(temp_sum.val[0], input_low_low);
temp_sum.val[1] = vaddq_s32(temp_sum.val[1], input_high_low);
temp_sum.val[2] = vaddq_s32(temp_sum.val[2], input_low_high);
temp_sum.val[3] = vaddq_s32(temp_sum.val[3], input_high_high);
}
}
temp_sum =
MultiplyByQuantizedMultiplier4Rows(temp_sum, multiplier, shift);
temp_sum.val[0] = vaddq_s32(temp_sum.val[0], bias_dup);
temp_sum.val[1] = vaddq_s32(temp_sum.val[1], bias_dup);
temp_sum.val[2] = vaddq_s32(temp_sum.val[2], bias_dup);
temp_sum.val[3] = vaddq_s32(temp_sum.val[3], bias_dup);
temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup);
temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup);
temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup);
temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup);
int16x4_t narrowed_low_low = vmovn_s32(temp_sum.val[0]);
int16x4_t narrowed_high_low = vmovn_s32(temp_sum.val[1]);
int16x4_t narrowed_low_high = vmovn_s32(temp_sum.val[2]);
int16x4_t narrowed_high_high = vmovn_s32(temp_sum.val[3]);
int16x8_t combined_low =
vcombine_s16(narrowed_low_low, narrowed_high_low);
int16x8_t combined_high =
vcombine_s16(narrowed_low_high, narrowed_high_high);
int8x8_t narrowed_low = vmovn_s16(combined_low);
int8x8_t narrowed_high = vmovn_s16(combined_high);
int8x16_t combined_output = vcombine_s8(narrowed_low, narrowed_high);
int8_t* output_data_ptr =
output_data + Offset(output_shape, out_b, 0, 0, out_d);
vst1q_s8(output_data_ptr, combined_output);
}
#endif // USE_NEON
for (; out_d < end_depth; ++out_d) {
int acc = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
acc += bias;
acc = std::min(std::max(acc, kMinValue), kMaxValue);
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
static_cast<int8_t>(acc);
}
}
}
struct MeanWorkerTask : cpu_backend_threadpool::Task {
MeanWorkerTask(const tflite::MeanParams& op_params,
const RuntimeShape& input_shape, const int8_t* input_data,
int32 multiplier, int32 shift, int32 bias,
const RuntimeShape& output_shape, int8_t* output_data,
int start_height, int end_height)
: op_params(op_params),
input_shape(input_shape),
input_data(input_data),
multiplier(multiplier),
shift(shift),
bias(bias),
output_shape(output_shape),
output_data(output_data),
start_height(start_height),
end_height(end_height) {}
void Run() override {
MeanImpl(op_params, input_shape, input_data, multiplier, shift, bias,
output_shape, output_data, start_height, end_height);
}
private:
const tflite::MeanParams& op_params;
const RuntimeShape& input_shape;
const int8_t* input_data;
int32 multiplier;
int32 shift;
int32 bias;
const RuntimeShape& output_shape;
int8_t* output_data;
int start_height;
int end_height;
};
inline void Mean(const tflite::MeanParams& op_params,
const RuntimeShape& unextended_input_shape,
const int8_t* input_data, int32 input_zero_point,
float input_scale, const RuntimeShape& unextended_output_shape,
int8_t* output_data, int32 output_zero_point,
float output_scale, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("Mean4D/Int8");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const float num_elements_in_axis = input_width * input_height;
float temp = input_zero_point * input_scale / output_scale;
temp = temp > 0 ? temp + 0.5f : temp - 0.5f;
int32_t bias = output_zero_point - static_cast<int32_t>(temp);
float real_scale = input_scale / (num_elements_in_axis * output_scale);
int32 multiplier, shift;
QuantizeMultiplier(real_scale, &multiplier, &shift);
constexpr int kMinDepthPerThread = 8;
int thread_count = output_depth / kMinDepthPerThread;
thread_count = thread_count > 0 ? thread_count : 1;
const int capped_thread_count =
std::min(thread_count, cpu_backend_context->max_num_threads());
if (capped_thread_count == 1) {
MeanImpl(op_params, input_shape, input_data, multiplier, shift, bias,
output_shape, output_data, 0, output_depth);
} else {
// Instead parallel for batch, we loop for the output_depth since batch
// is typical 1.
std::vector<MeanWorkerTask> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(capped_thread_count);
int depth_start = 0;
for (int i = 0; i < capped_thread_count; ++i) {
// Try to distribute the tasks as even as possible.
int depth_end = depth_start +
(output_depth - depth_start) / (capped_thread_count - i);
tasks.emplace_back(op_params, input_shape, input_data, multiplier, shift,
bias, output_shape, output_data, depth_start,
depth_end);
depth_start = depth_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_MEAN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/mean.h | C++ | apache-2.0 | 10,382 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_MUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_MUL_H_
#include <algorithm>
#include "fixedpoint/fixedpoint.h"
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/reference/integer_ops/mul.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
// Element-wise mul that can often be used for inner loop of broadcast Mul as
// well as the non-broadcast Mul.
inline void MulElementwise(int size, const ArithmeticParams& params,
const int8* input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("MulElementwiseInt8/8bit");
int i = 0;
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
TFLITE_DCHECK_GT(params.output_offset, -256);
TFLITE_DCHECK_LT(params.output_offset, 256);
#ifdef USE_NEON
const int16x8_t input1_offset_vector = vdupq_n_s16(params.input1_offset);
const int16x8_t input2_offset_vector = vdupq_n_s16(params.input2_offset);
const int16x8_t output_offset_vector = vdupq_n_s16(params.output_offset);
const auto output_activation_min_vector =
vdupq_n_s8(params.quantized_activation_min);
const auto output_activation_max_vector =
vdupq_n_s8(params.quantized_activation_max);
const int left_shift = std::max(0, params.output_shift);
const int right_shift = std::max(0, -params.output_shift);
const int32x4_t left_shift_vec = vdupq_n_s32(left_shift);
for (; i <= size - 16; i += 16) {
// We load / store 16 at a time, multiplying as four sets of 4 int32s.
const int8x16_t input1_val_original = vld1q_s8(input1_data + i);
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int16x8_t input1_val_s16_high =
vmovl_s8(vget_high_s8(input1_val_original));
const int16x8_t input1_val_s16_low =
vmovl_s8(vget_low_s8(input1_val_original));
const int16x8_t input2_val_s16_high =
vmovl_s8(vget_high_s8(input2_val_original));
const int16x8_t input2_val_s16_low =
vmovl_s8(vget_low_s8(input2_val_original));
const int16x8_t input1_val_high =
vaddq_s16(input1_val_s16_high, input1_offset_vector);
const int16x8_t input2_val_high =
vaddq_s16(input2_val_s16_high, input2_offset_vector);
const int16x8_t input1_val_low =
vaddq_s16(input1_val_s16_low, input1_offset_vector);
const int16x8_t input2_val_low =
vaddq_s16(input2_val_s16_low, input2_offset_vector);
const int16x4_t input1_val_high_high = vget_high_s16(input1_val_high);
const int16x4_t input1_val_high_low = vget_low_s16(input1_val_high);
const int16x4_t input1_val_low_high = vget_high_s16(input1_val_low);
const int16x4_t input1_val_low_low = vget_low_s16(input1_val_low);
const int16x4_t input2_val_high_high = vget_high_s16(input2_val_high);
const int16x4_t input2_val_high_low = vget_low_s16(input2_val_high);
const int16x4_t input2_val_low_high = vget_high_s16(input2_val_low);
const int16x4_t input2_val_low_low = vget_low_s16(input2_val_low);
auto p1 = vmull_s16(input2_val_high_high, input1_val_high_high);
auto p2 = vmull_s16(input2_val_high_low, input1_val_high_low);
auto p3 = vmull_s16(input2_val_low_high, input1_val_low_high);
auto p4 = vmull_s16(input2_val_low_low, input1_val_low_low);
p1 = vshlq_s32(p1, left_shift_vec);
p2 = vshlq_s32(p2, left_shift_vec);
p3 = vshlq_s32(p3, left_shift_vec);
p4 = vshlq_s32(p4, left_shift_vec);
p1 = vqrdmulhq_n_s32(p1, params.output_multiplier);
p2 = vqrdmulhq_n_s32(p2, params.output_multiplier);
p3 = vqrdmulhq_n_s32(p3, params.output_multiplier);
p4 = vqrdmulhq_n_s32(p4, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
p1 = RoundingDivideByPOT(p1, right_shift);
p2 = RoundingDivideByPOT(p2, right_shift);
p3 = RoundingDivideByPOT(p3, right_shift);
p4 = RoundingDivideByPOT(p4, right_shift);
const auto p1_narrowed = vqmovn_s32(p1);
const auto p2_narrowed = vqmovn_s32(p2);
const auto p3_narrowed = vqmovn_s32(p3);
const auto p4_narrowed = vqmovn_s32(p4);
const int16x8_t p_part1 =
vaddq_s16(vcombine_s16(p2_narrowed, p1_narrowed), output_offset_vector);
const int16x8_t p_part2 =
vaddq_s16(vcombine_s16(p4_narrowed, p3_narrowed), output_offset_vector);
const int8x16_t p = vcombine_s8(vqmovn_s16(p_part2), vqmovn_s16(p_part1));
const auto clamped = vmaxq_s8(output_activation_min_vector,
vminq_s8(output_activation_max_vector, p));
vst1q_s8(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
const int32 input1_val = params.input1_offset + input1_data[i];
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<int8>(clamped_output);
}
}
// Broadcast mul that can often be used for inner loop of broadcast Mul.
inline void MulSimpleBroadcast(int size, const ArithmeticParams& params,
const int8 broadcast_value,
const int8* input2_data, int8* output_data) {
ruy::profiler::ScopeLabel label("BroadMulSimpleBroadcastInt8/8bit");
const int16 input1_val = params.input1_offset + broadcast_value;
int i = 0;
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
TFLITE_DCHECK_GT(params.output_offset, -256);
TFLITE_DCHECK_LT(params.output_offset, 256);
#ifdef USE_NEON
const auto input2_offset_vector = vdupq_n_s16(params.input2_offset);
const auto output_offset_vector = vdupq_n_s16(params.output_offset);
const auto output_activation_min_vector =
vdupq_n_s8(params.quantized_activation_min);
const auto output_activation_max_vector =
vdupq_n_s8(params.quantized_activation_max);
const int left_shift = std::max(0, params.output_shift);
const int right_shift = std::max(0, -params.output_shift);
const int32x4_t left_shift_vec = vdupq_n_s32(left_shift);
for (; i <= size - 16; i += 16) {
// We load / store 16 at a time, multiplying as four sets of 4 int32s.
const auto input2_val_original = vld1q_s8(input2_data + i);
const auto input2_val_s16_high =
vmovl_s8(vget_high_s8(input2_val_original));
const auto input2_val_s16_low = vmovl_s8(vget_low_s8(input2_val_original));
const auto input2_val_high =
vaddq_s16(input2_val_s16_high, input2_offset_vector);
const auto input2_val_low =
vaddq_s16(input2_val_s16_low, input2_offset_vector);
const auto input2_val_low_low = vget_low_s16(input2_val_low);
const auto input2_val_low_high = vget_high_s16(input2_val_low);
const auto input2_val_high_low = vget_low_s16(input2_val_high);
const auto input2_val_high_high = vget_high_s16(input2_val_high);
auto p1 = vmull_n_s16(input2_val_high_high, input1_val);
auto p2 = vmull_n_s16(input2_val_high_low, input1_val);
auto p3 = vmull_n_s16(input2_val_low_high, input1_val);
auto p4 = vmull_n_s16(input2_val_low_low, input1_val);
p1 = vshlq_s32(p1, left_shift_vec);
p2 = vshlq_s32(p2, left_shift_vec);
p3 = vshlq_s32(p3, left_shift_vec);
p4 = vshlq_s32(p4, left_shift_vec);
p1 = vqrdmulhq_n_s32(p1, params.output_multiplier);
p2 = vqrdmulhq_n_s32(p2, params.output_multiplier);
p3 = vqrdmulhq_n_s32(p3, params.output_multiplier);
p4 = vqrdmulhq_n_s32(p4, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
p1 = RoundingDivideByPOT(p1, right_shift);
p2 = RoundingDivideByPOT(p2, right_shift);
p3 = RoundingDivideByPOT(p3, right_shift);
p4 = RoundingDivideByPOT(p4, right_shift);
const auto p1_narrowed = vqmovn_s32(p1);
const auto p2_narrowed = vqmovn_s32(p2);
const auto p3_narrowed = vqmovn_s32(p3);
const auto p4_narrowed = vqmovn_s32(p4);
const int16x8_t p_part1 =
vaddq_s16(vcombine_s16(p2_narrowed, p1_narrowed), output_offset_vector);
const int16x8_t p_part2 =
vaddq_s16(vcombine_s16(p4_narrowed, p3_narrowed), output_offset_vector);
const int8x16_t p = vcombine_s8(vqmovn_s16(p_part2), vqmovn_s16(p_part1));
const auto clamped = vmaxq_s8(output_activation_min_vector,
vminq_s8(output_activation_max_vector, p));
vst1q_s8(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<int8>(clamped_output);
}
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8* input1_data,
const RuntimeShape& input2_shape, const int8* input2_data,
const RuntimeShape& output_shape, int8* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
ruy::profiler::ScopeLabel label("MulInt8/8bit");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void BroadcastMulDispatch(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8* input1_data,
const RuntimeShape& input2_shape,
const int8* input2_data,
const RuntimeShape& output_shape,
int8* output_data) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return reference_integer_ops::BroadcastMul4DSlow(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data);
}
optimized_ops::BinaryBroadcastFiveFold(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data, MulElementwise, MulSimpleBroadcast);
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_MUL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/mul.h | C++ | apache-2.0 | 12,284 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_POOLING_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_POOLING_H_
#include <string.h>
#include <algorithm>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/im2col_utils.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_integer_ops {
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& output_shape,
int8* output_data) {
ruy::profiler::ScopeLabel label("MaxPool/8bit");
// Here, and in other pooling ops, in order to maintain locality of reference,
// to minimize some recalculations, and to load into NEON vector registers, we
// use an inner loop down the depth. Since depths can be large and hence we
// would need arbitrarily large temporary storage, we divide the work up into
// depth tranches just within the batch loop.
static constexpr int kPoolingAccTrancheSize = 256;
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
int8 acc[kPoolingAccTrancheSize];
for (int batch = 0; batch < batches; ++batch) {
// We proceed through the depth in tranches (see comment above). The
// depth_base is the depth at the beginning of the tranche. The
// tranche_depth is the depth dimension of the tranche.
for (int depth_base = 0; depth_base < depth;
depth_base += kPoolingAccTrancheSize) {
const int tranche_depth =
std::min(depth - depth_base, kPoolingAccTrancheSize);
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
memset(acc, params.quantized_activation_min,
tranche_depth * sizeof(acc[0]));
const int8* input_ptr =
input_data + depth_base +
depth * (in_x_origin +
input_width * (in_y_origin + input_height * batch));
for (int fy = filter_y_start; fy < filter_y_end; fy++) {
const int8* input_row_ptr =
input_ptr + depth * (fy * input_width + filter_x_start);
for (int fx = filter_x_start; fx < filter_x_end; fx++) {
const int8* input_channel_ptr = input_row_ptr;
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 16; channel += 16) {
int8x16_t acc_reg = vld1q_s8(acc + channel);
int8x16_t input_reg = vld1q_s8(input_channel_ptr);
input_channel_ptr += 16;
acc_reg = vmaxq_s8(acc_reg, input_reg);
vst1q_s8(acc + channel, acc_reg);
}
for (; channel <= tranche_depth - 8; channel += 8) {
int8x8_t acc_reg = vld1_s8(acc + channel);
int8x8_t input_reg = vld1_s8(input_channel_ptr);
input_channel_ptr += 8;
acc_reg = vmax_s8(acc_reg, input_reg);
vst1_s8(acc + channel, acc_reg);
}
#endif
for (; channel < tranche_depth; ++channel) {
acc[channel] = std::max(acc[channel], *input_channel_ptr++);
}
input_row_ptr += depth;
}
}
int8* output_ptr = output_data + Offset(output_shape, batch, out_y,
out_x, depth_base);
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 16; channel += 16) {
int8x16_t a = vld1q_s8(acc + channel);
a = vminq_s8(a, vdupq_n_s8(params.quantized_activation_max));
a = vmaxq_s8(a, vdupq_n_s8(params.quantized_activation_min));
vst1q_s8(output_ptr + channel, a);
}
for (; channel <= tranche_depth - 8; channel += 8) {
int8x8_t a = vld1_s8(acc + channel);
a = vmin_s8(a, vdup_n_s8(params.quantized_activation_max));
a = vmax_s8(a, vdup_n_s8(params.quantized_activation_min));
vst1_s8(output_ptr + channel, a);
}
#endif
for (; channel < tranche_depth; ++channel) {
int8 a = acc[channel];
a = std::max<int8>(a, params.quantized_activation_min);
a = std::min<int8>(a, params.quantized_activation_max);
output_ptr[channel] = static_cast<int8>(a);
}
}
}
}
}
}
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape, const int8* input_data,
const RuntimeShape& output_shape, int8* output_data) {
ruy::profiler::ScopeLabel label("AveragePool/8bitWith32bitAccumulator");
// Here, and in other pooling ops, in order to maintain locality of reference,
// to minimize some recalculations, and to load into NEON vector registers, we
// use an inner loop down the depth. Since depths can be large and hence we
// would need arbitrarily large temporary storage, we divide the work up into
// depth tranches just within the batch loop.
static constexpr int kPoolingAccTrancheSize = 256;
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
int32 acc[kPoolingAccTrancheSize];
for (int batch = 0; batch < batches; ++batch) {
// We proceed through the depth in tranches (see comment above). The
// depth_base is the depth at the beginning of the tranche. The
// tranche_depth is the depth dimension of the tranche.
for (int depth_base = 0; depth_base < depth;
depth_base += kPoolingAccTrancheSize) {
const int tranche_depth =
std::min(depth - depth_base, kPoolingAccTrancheSize);
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
const int filter_count =
(filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
memset(acc, 0, tranche_depth * sizeof(acc[0]));
const int8* input_ptr =
input_data + depth_base +
depth * (in_x_origin +
input_width * (in_y_origin + input_height * batch));
for (int fy = filter_y_start; fy < filter_y_end; fy++) {
const int8* input_row_ptr =
input_ptr + depth * (fy * input_width + filter_x_start);
for (int fx = filter_x_start; fx < filter_x_end; fx++) {
const int8* input_channel_ptr = input_row_ptr;
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 16; channel += 16) {
int16x4_t acc_reg[4];
int8x16_t input_reg = vld1q_s8(input_channel_ptr);
input_channel_ptr += 16;
acc_reg[0] = vget_low_s16(vmovl_s8(vget_low_s8(input_reg)));
acc_reg[1] = vget_high_s16(vmovl_s8(vget_low_s8(input_reg)));
acc_reg[2] = vget_low_s16(vmovl_s8(vget_high_s8(input_reg)));
acc_reg[3] = vget_high_s16(vmovl_s8(vget_high_s8(input_reg)));
for (int i = 0; i < 4; i++) {
vst1q_s32(
acc + channel + 4 * i,
vaddw_s16(vld1q_s32(acc + channel + 4 * i), acc_reg[i]));
}
}
for (; channel <= tranche_depth - 8; channel += 8) {
int16x4_t acc_reg[2];
int16x8_t input_reg = vmovl_s8(vld1_s8(input_channel_ptr));
input_channel_ptr += 8;
acc_reg[0] = vget_low_s16(input_reg);
acc_reg[1] = vget_high_s16(input_reg);
for (int i = 0; i < 2; i++) {
vst1q_s32(
acc + channel + 4 * i,
vaddw_s16(vld1q_s32(acc + channel + 4 * i), acc_reg[i]));
}
}
#endif
for (; channel < tranche_depth; ++channel) {
acc[channel] += *input_channel_ptr++;
}
input_row_ptr += depth;
}
}
int8* output_ptr = output_data + Offset(output_shape, batch, out_y,
out_x, depth_base);
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 8; channel += 8) {
int16 buf[8];
for (int i = 0; i < 8; i++) {
buf[i] =
acc[channel + i] > 0
? (acc[channel + i] + filter_count / 2) / filter_count
: (acc[channel + i] - filter_count / 2) / filter_count;
}
int8x8_t buf8 = vqmovn_s16(vld1q_s16(buf));
buf8 = vmin_s8(buf8, vdup_n_s8(params.quantized_activation_max));
buf8 = vmax_s8(buf8, vdup_n_s8(params.quantized_activation_min));
vst1_s8(output_ptr + channel, buf8);
}
#endif
for (; channel < tranche_depth; ++channel) {
int16 a = acc[channel] > 0
? (acc[channel] + filter_count / 2) / filter_count
: (acc[channel] - filter_count / 2) / filter_count;
a = std::max<int16>(a, params.quantized_activation_min);
a = std::min<int16>(a, params.quantized_activation_max);
output_ptr[channel] = static_cast<int8>(a);
}
}
}
}
}
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_POOLING_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/pooling.h | C++ | apache-2.0 | 13,011 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_TRANSPOSE_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_TRANSPOSE_CONV_H_
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
namespace tflite {
namespace optimized_integer_ops {
// TransposeConvV2 expect the weights in HWOI order.
inline void TransposeConvV2(
const ConvParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& hwoi_ordered_filter_shape,
const int8_t* hwoi_ordered_filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int8_t* output_data, const RuntimeShape& col2im_shape, int32_t* col2im_data,
int32_t* scratch_data, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("TransposeConvV2/int8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(hwoi_ordered_filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK(col2im_data);
TFLITE_DCHECK(hwoi_ordered_filter_data);
const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
const int input_image_size = input_shape.Dims(1) * input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_image_size = output_height * output_width;
const int input_depth =
MatchingDim(input_shape, 3, hwoi_ordered_filter_shape, 3);
const int output_depth =
MatchingDim(output_shape, 3, hwoi_ordered_filter_shape, 2);
const int input_offset = input_image_size * input_depth;
const int output_offset = output_image_size * output_depth;
const int filter_height = hwoi_ordered_filter_shape.Dims(0);
const int filter_width = hwoi_ordered_filter_shape.Dims(1);
const int padding_top = params.padding_values.height;
const int padding_bottom =
params.padding_values.height + params.padding_values.height_offset;
const int padding_left = params.padding_values.width;
const int padding_right =
params.padding_values.width + params.padding_values.width_offset;
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
const int hwoi_ordered_filter_total_size =
filter_height * filter_width * output_depth;
cpu_backend_gemm::MatrixParams<int8_t> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = hwoi_ordered_filter_total_size;
lhs_params.cols = input_depth;
// Since our weight is symmetric quantized, the zp will always be 0.
lhs_params.zero_point = 0;
int32_t* scratch_data_p = scratch_data;
std::fill_n(scratch_data, output_offset * batch_size, static_cast<int32>(0));
for (int i = 0; i < batch_size; ++i) {
cpu_backend_gemm::MatrixParams<int8_t> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = input_depth;
rhs_params.cols = input_image_size;
rhs_params.zero_point = -params.input_offset;
cpu_backend_gemm::MatrixParams<int32_t> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = hwoi_ordered_filter_total_size;
dst_params.cols = input_image_size;
cpu_backend_gemm::GemmParams<int32_t, int32_t> gemm_params;
cpu_backend_gemm::Gemm(lhs_params, hwoi_ordered_filter_data, rhs_params,
input_data + input_offset * i, dst_params,
col2im_data, gemm_params, cpu_backend_context);
optimized_ops::Col2im(
col2im_data, output_depth, output_height, output_width, filter_height,
filter_width, padding_top, padding_left, padding_bottom, padding_right,
stride_height, stride_width, scratch_data_p);
scratch_data_p += output_offset;
}
scratch_data_p = scratch_data;
optimized_ops::BiasAdd(scratch_data_p, bias_data, batch_size, output_height,
output_width, output_depth);
const int32_t output_min = std::numeric_limits<int8_t>::min();
const int32_t output_max = std::numeric_limits<int8_t>::max();
optimized_ops::Quantize(output_multiplier, output_shift, output_depth,
output_shape.FlatSize(), params.output_offset,
output_min, output_max, scratch_data, output_data);
}
} // namespace optimized_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_INTEGER_OPS_TRANSPOSE_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/integer_ops/transpose_conv.h | C++ | apache-2.0 | 5,182 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_LEGACY_OPTIMIZED_OPS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_LEGACY_OPTIMIZED_OPS_H_
#include <stdint.h>
#include <sys/types.h>
#include "public/gemmlowp.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/depthwiseconv_multithread.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/depthwise_conv.h"
#include "tensorflow/lite/kernels/internal/optimized/integer_ops/fully_connected.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/optimized/resize_bilinear.h"
#include "tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
// Unoptimized reference ops:
using reference_ops::Broadcast4DSlowGreater;
using reference_ops::Broadcast4DSlowGreaterEqual;
using reference_ops::Broadcast4DSlowGreaterEqualWithScaling;
using reference_ops::Broadcast4DSlowGreaterWithScaling;
using reference_ops::Broadcast4DSlowLess;
using reference_ops::Broadcast4DSlowLessEqual;
using reference_ops::Broadcast4DSlowLessEqualWithScaling;
using reference_ops::Broadcast4DSlowLessWithScaling;
using reference_ops::BroadcastAdd4DSlow;
using reference_ops::BroadcastGreater;
using reference_ops::BroadcastGreaterEqual;
using reference_ops::BroadcastLess;
using reference_ops::BroadcastLessEqual;
using reference_ops::BroadcastMul4DSlow;
using reference_ops::BroadcastSubSlow;
using reference_ops::Concatenation;
using reference_ops::ConcatenationWithScaling;
using reference_ops::DepthConcatenation;
using reference_ops::Div;
using reference_ops::FakeQuant;
using reference_ops::Gather;
using reference_ops::Greater;
using reference_ops::GreaterEqual;
using reference_ops::GreaterEqualWithScaling;
using reference_ops::GreaterWithScaling;
using reference_ops::Less;
using reference_ops::LessEqual;
using reference_ops::LessEqualWithScaling;
using reference_ops::LessWithScaling;
using reference_ops::Mean;
using reference_ops::RankOneSelect;
using reference_ops::Relu1;
using reference_ops::Relu6;
using reference_ops::ReluX;
using reference_ops::Select;
using reference_ops::SpaceToBatchND;
using reference_ops::Split;
using reference_ops::TensorFlowSplit;
static constexpr int kDepthwiseReverseShift = -1;
template <typename Scalar, int N>
VectorMap<Scalar> MapAsVector(Scalar* data, const Dims<N>& dims) {
const int size = FlatSize(dims);
return VectorMap<Scalar>(data, size, 1);
}
template <typename Scalar, int N>
MatrixMap<Scalar> MapAsMatrixWithFirstDimAsRows(Scalar* data,
const Dims<N>& dims) {
const int rows = dims.sizes[0];
int cols = 1;
for (int d = 1; d < N; d++) {
cols *= dims.sizes[d];
}
return MatrixMap<Scalar>(data, rows, cols);
}
template <typename Scalar, int N>
MatrixMap<Scalar> MapAsMatrixWithLastDimAsCols(Scalar* data,
const Dims<N>& dims) {
const int cols = dims.sizes[N - 1];
int rows = 1;
for (int d = 0; d < N - 1; d++) {
rows *= dims.sizes[d];
}
return MatrixMap<Scalar>(data, rows, cols);
}
template <typename Scalar, int N>
ArrayMap<Scalar> MapAsArrayWithFirstDimAsRows(Scalar* data,
const Dims<N>& dims) {
const int rows = dims.sizes[0];
int cols = 1;
for (int d = 1; d < N; d++) {
cols *= dims.sizes[d];
}
return ArrayMap<Scalar>(data, rows, cols);
}
// TODO(b/62193649): this function is only needed as long
// as we have the --variable_batch hack.
template <typename Scalar, int N>
MatrixMap<Scalar> MapAsMatrixWithGivenNumberOfRows(Scalar* data,
const Dims<N>& dims,
int rows) {
const int flatsize = FlatSize(dims);
TFLITE_DCHECK((flatsize % rows) == 0);
const int cols = flatsize / rows;
return MatrixMap<Scalar>(data, rows, cols);
}
inline bool AreSameDims(const Dims<4>& dims1, const Dims<4>& dims2) {
for (int i = 0; i < 4; i++) {
if (dims1.sizes[i] != dims2.sizes[i]) {
return false;
}
}
return true;
}
inline void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height,
int dilation_width_factor, int dilation_height_factor,
int pad_width, int pad_height, int depth_multiplier,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::DepthwiseParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.depth_multiplier = depth_multiplier;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
const RuntimeShape output_shape = DimsToShape(output_dims);
const int output_height = output_shape.Dims(1);
DepthwiseConvImpl(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data,
DimsToShape(bias_dims), bias_data, output_shape,
output_data, CpuFlags(), /*thread_start=*/0,
/*thread_end=*/output_height, /*thread_dim=*/1);
}
inline void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
DepthwiseConv(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride_width, stride_height, 1, 1, pad_width,
pad_height, depth_multiplier, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
DepthwiseConv(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride_width, stride_height, pad_width, pad_height,
depth_multiplier, output_activation_min, output_activation_max,
output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int depth_multiplier,
float* output_data, const Dims<4>& output_dims) {
DepthwiseConv<Ac>(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride, stride, pad_width, pad_height,
depth_multiplier, output_data, output_dims);
}
template <DepthwiseConvOutputRounding kOutputRounding>
inline void LegacyDepthwiseConvWithRounding(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, int thread_start, int thread_end, int thread_dim) {
ruy::profiler::ScopeLabel label("DepthwiseConv/8bit");
const int depth_multiplier = params.depth_multiplier;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_GE(dilation_width_factor, 1);
TFLITE_DCHECK_GE(dilation_height_factor, 1);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_depth = input_shape.Dims(3);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
// Enable for arm64 except for the Nvidia Linux 4 Tegra (L4T) running on
// Jetson TX-2. This compiler does not support the offsetof() macro.
#if defined(__aarch64__) && !defined(GOOGLE_L4T)
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int output_shift = params.output_shift;
// Call kernel optimized for depthwise convolutions using 3x3 filters if
// parameters are supported.
if (depthwise_conv::Fast3x3FilterKernelSupported(
input_shape, filter_shape, stride_width, stride_height,
dilation_width_factor, dilation_height_factor, pad_width, pad_height,
depth_multiplier, output_shape, output_shift)) {
ruy::profiler::ScopeLabel specialized_label("DepthwiseConv/8bit/3x3");
depthwise_conv::DepthwiseConv3x3Filter<kOutputRounding>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start, thread_end,
thread_dim);
return;
}
#endif
ruy::profiler::ScopeLabel specialized_label("DepthwiseConv/8bit/General");
depthwise_conv::DepthwiseConvGeneral(params, input_shape, input_data,
filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data,
thread_start, thread_end, thread_dim);
}
inline void LegacyDepthwiseConvImpl(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, int thread_start, int thread_end, int thread_dim) {
return LegacyDepthwiseConvWithRounding<
DepthwiseConvOutputRounding::kAwayFromZero>(
params, input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data, thread_start, thread_end,
thread_dim);
}
inline void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height,
int dilation_width_factor, int dilation_height_factor,
int pad_width, int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::DepthwiseParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.depth_multiplier = depth_multiplier;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kDepthwiseReverseShift * output_shift;
const RuntimeShape output_shape = DimsToShape(output_dims);
const int output_height = output_shape.Dims(1);
LegacyDepthwiseConvImpl(
op_params, DimsToShape(input_dims), input_data, DimsToShape(filter_dims),
filter_data, DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data, /*thread_start=*/0,
/*thread_end=*/output_height, /*thread_dim=*/1);
}
inline void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
DepthwiseConv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width,
stride_height, 1, 1, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
// Legacy, for compatibility with old checked-in code.
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier, int32 output_offset,
int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
DepthwiseConv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width,
stride_height, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
// Legacy, for compatibility with old checked-in code.
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
DepthwiseConv<Ac>(input_data, input_dims, input_offset, filter_data,
filter_dims, filter_offset, bias_data, bias_dims, stride,
stride, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
template <typename T, typename TS>
struct LegacyDepthwiseConvWorkerTask : public gemmlowp::Task {
LegacyDepthwiseConvWorkerTask(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const T* input_data, const RuntimeShape& filter_shape,
const T* filter_data, const RuntimeShape& bias_shape, const TS* bias_data,
const RuntimeShape& output_shape, T* output_data, int thread_start,
int thread_end, int thread_dim)
: params_(params),
input_shape_(input_shape),
input_data_(input_data),
filter_shape_(filter_shape),
filter_data_(filter_data),
bias_shape_(bias_shape),
bias_data_(bias_data),
output_shape_(output_shape),
output_data_(output_data),
thread_start_(thread_start),
thread_end_(thread_end),
thread_dim_(thread_dim) {}
void Run() override {
LegacyDepthwiseConvImpl(params_, input_shape_, input_data_, filter_shape_,
filter_data_, bias_shape_, bias_data_,
output_shape_, output_data_, thread_start_,
thread_end_, thread_dim_);
}
private:
const DepthwiseParams& params_;
const RuntimeShape& input_shape_;
const T* input_data_;
const RuntimeShape& filter_shape_;
const T* filter_data_;
const RuntimeShape& bias_shape_;
const TS* bias_data_;
const RuntimeShape& output_shape_;
T* output_data_;
int thread_start_;
int thread_end_;
int thread_dim_;
};
inline int HowManyConvThreads(const RuntimeShape& output_shape,
const RuntimeShape& filter_shape,
int thread_dim) {
constexpr int kMinMulPerThread = 8;
const int output_units = output_shape.Dims(thread_dim);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int num_mul_per_unit =
FlatSizeSkipDim(output_shape, thread_dim) * filter_height * filter_width;
const int min_units_per_thread = kMinMulPerThread / num_mul_per_unit + 1;
int thread_count = output_units / min_units_per_thread;
return thread_count;
}
inline void DepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, gemmlowp::GemmContext* gemmlowp_context = nullptr) {
ruy::profiler::ScopeLabel label("DepthwiseConv");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_batches = output_shape.Dims(0);
const int output_rows = output_shape.Dims(1);
int thread_count_batch = HowManyConvThreads(output_shape, filter_shape, 0);
int thread_count_row = HowManyConvThreads(output_shape, filter_shape, 1);
int thread_dim, thread_count, thread_dim_size;
if (thread_count_batch > thread_count_row) {
thread_dim = 0;
thread_dim_size = output_batches;
thread_count = thread_count_batch;
} else {
thread_dim = 1;
thread_dim_size = output_rows;
thread_count = thread_count_row;
}
const int max_threads =
gemmlowp_context ? gemmlowp_context->max_num_threads() : 1;
thread_count = std::max(1, std::min(thread_count, max_threads));
if (thread_count == 1) {
LegacyDepthwiseConvImpl(params, input_shape, input_data, filter_shape,
filter_data, bias_shape, bias_data, output_shape,
output_data, /*thread_start=*/0,
/*thread_end=*/output_rows, /*thread_dim=*/1);
} else {
std::vector<gemmlowp::Task*> tasks(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks[i] = new LegacyDepthwiseConvWorkerTask<uint8, int32>(
params, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data, thread_start,
thread_end, thread_dim);
thread_start = thread_end;
}
gemmlowp_context->workers_pool()->LegacyExecuteAndDestroyTasks(tasks);
}
}
template <typename T, typename TS>
struct LegacyPerChannelDepthwiseConvWorkerTask : public gemmlowp::Task {
LegacyPerChannelDepthwiseConvWorkerTask(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const T* input_data, const RuntimeShape& filter_shape,
const T* filter_data, const RuntimeShape& bias_shape, const TS* bias_data,
const RuntimeShape& output_shape, T* output_data, int thread_start,
int thread_end, int thread_dim)
: params_(params),
output_multiplier_(output_multiplier),
output_shift_(output_shift),
input_shape_(input_shape),
input_data_(input_data),
filter_shape_(filter_shape),
filter_data_(filter_data),
bias_shape_(bias_shape),
bias_data_(bias_data),
output_shape_(output_shape),
output_data_(output_data),
thread_start_(thread_start),
thread_end_(thread_end),
thread_dim_(thread_dim) {}
void Run() override {
CpuBackendContext backend_context;
optimized_integer_ops::DepthwiseConvImpl(
params_, output_multiplier_, output_shift_, input_shape_, input_data_,
filter_shape_, filter_data_, bias_shape_, bias_data_, output_shape_,
output_data_, thread_start_, thread_end_, thread_dim_, backend_context);
}
private:
const DepthwiseParams& params_;
const int32* output_multiplier_;
const int32* output_shift_;
const RuntimeShape& input_shape_;
const T* input_data_;
const RuntimeShape& filter_shape_;
const T* filter_data_;
const RuntimeShape& bias_shape_;
const TS* bias_data_;
const RuntimeShape& output_shape_;
T* output_data_;
int thread_start_;
int thread_end_;
int thread_dim_;
};
inline void DepthwiseConvPerChannel(
const DepthwiseParams& params, const int32* output_multiplier,
const int32* output_shift, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
gemmlowp::GemmContext* gemmlowp_context = nullptr) {
ruy::profiler::ScopeLabel label("DepthwiseConvInt8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int output_batches = output_shape.Dims(0);
const int output_rows = output_shape.Dims(1);
int thread_count_batch = HowManyConvThreads(output_shape, filter_shape, 0);
int thread_count_row = HowManyConvThreads(output_shape, filter_shape, 1);
int thread_dim, thread_count, thread_dim_size;
if (thread_count_batch > thread_count_row) {
thread_dim = 0;
thread_dim_size = output_batches;
thread_count = thread_count_batch;
} else {
thread_dim = 1;
thread_dim_size = output_rows;
thread_count = thread_count_row;
}
const int max_threads =
gemmlowp_context ? gemmlowp_context->max_num_threads() : 1;
thread_count = std::max(1, std::min(thread_count, max_threads));
if (thread_count == 1) {
CpuBackendContext backend_context;
optimized_integer_ops::DepthwiseConvImpl(
params, output_multiplier, output_shift, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, /*thread_start=*/0,
/*thread_end=*/output_rows, /*thread_dim=*/1, backend_context);
} else {
std::vector<gemmlowp::Task*> tasks(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
int thread_end =
thread_start + (thread_dim_size - thread_start) / (thread_count - i);
tasks[i] = new LegacyPerChannelDepthwiseConvWorkerTask<int8, int32>(
params, output_multiplier, output_shift, input_shape, input_data,
filter_shape, filter_data, bias_shape, bias_data, output_shape,
output_data, thread_start, thread_end, thread_dim);
thread_start = thread_end;
}
gemmlowp_context->workers_pool()->LegacyExecuteAndDestroyTasks(tasks);
}
}
inline void DepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
DepthwiseConvImpl(params, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data,
CpuFlags(),
/*thread_start=*/0,
/*thread_end=*/output_shape.Dims(1), /*thread_dim=*/1);
}
inline void AddBiasAndEvalActivationFunction(const float* bias_data,
const Dims<4>& bias_dims,
float* array_data,
const Dims<4>& array_dims,
float output_activation_min,
float output_activation_max) {
AddBiasAndEvalActivationFunction(output_activation_min, output_activation_max,
DimsToShape(bias_dims), bias_data,
DimsToShape(array_dims), array_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AddBiasAndEvalActivationFunction(const float* bias_data,
const Dims<4>& bias_dims,
float* array_data,
const Dims<4>& array_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
AddBiasAndEvalActivationFunction(bias_data, bias_dims, array_data, array_dims,
output_activation_min,
output_activation_max);
}
template <typename Lhs, typename Rhs, typename Result>
void Gemm(const Eigen::MatrixBase<Lhs>& lhs, const Eigen::MatrixBase<Rhs>& rhs,
Eigen::MatrixBase<Result>* result) {
if (rhs.cols() == 1) {
ruy::profiler::ScopeLabel label("GEMV");
result->col(0).noalias() = lhs * rhs.col(0);
} else {
ruy::profiler::ScopeLabel label("GEMM");
result->noalias() = lhs * rhs;
}
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& weights_shape,
const float* weights_data, const RuntimeShape& bias_shape,
const float* optional_bias_data, const RuntimeShape& output_shape,
float* output_data) {
ruy::profiler::ScopeLabel label("FullyConnected");
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
// TODO(b/62193649): this convoluted shape computation (determining
// input_rows from the weights_dims, then MapAsMatrixWithGivenNumberOfRows)
// is because the current --variable_batch hack consists in overwriting the
// 3rd dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
// When that is fixed, this should become:
// const auto input_matrix_map =
// MapAsMatrixWithFirstDimAsRows(input_data, input_dims);
const int dims_count = weights_shape.DimensionsCount();
const int input_rows = weights_shape.Dims(dims_count - 1);
const auto input_matrix_map =
MapAsMatrixWithGivenNumberOfRows(input_data, input_shape, input_rows);
const auto filter_matrix_map =
MapAsMatrixWithLastDimAsRows(weights_data, weights_shape);
auto output_matrix_map =
MapAsMatrixWithLastDimAsRows(output_data, output_shape);
Gemm(filter_matrix_map.transpose(), input_matrix_map, &output_matrix_map);
if (optional_bias_data != nullptr) {
AddBiasAndEvalActivationFunction(
output_activation_min, output_activation_max, bias_shape,
optional_bias_data, output_shape, output_data);
} else {
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
output_data[i], output_activation_min, output_activation_max);
}
}
}
inline void FullyConnected(const float* input_data, const Dims<4>& input_dims,
const float* weights_data,
const Dims<4>& weights_dims, const float* bias_data,
const Dims<4>& bias_dims,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::FullyConnectedParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
FullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(weights_dims), weights_data,
DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void FullyConnected(const float* input_data, const Dims<4>& input_dims,
const float* weights_data, const Dims<4>& weights_dims,
const float* bias_data, const Dims<4>& bias_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
FullyConnected(input_data, input_dims, weights_data, weights_dims, bias_data,
bias_dims, output_activation_min, output_activation_max,
output_data, output_dims);
}
struct GemmlowpOutputPipeline {
typedef gemmlowp::VectorMap<const int32, gemmlowp::VectorShape::Col>
ColVectorMap;
typedef std::tuple<gemmlowp::OutputStageBiasAddition<ColVectorMap>,
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent,
gemmlowp::OutputStageClamp,
gemmlowp::OutputStageSaturatingCastToUint8>
Pipeline;
static Pipeline MakeExp(const int32* bias_data, int output_rows,
int32 output_offset, int32 output_multiplier,
int output_left_shift, int32 output_activation_min,
int32 output_activation_max) {
ColVectorMap bias_vector(bias_data, output_rows);
gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage;
bias_addition_stage.bias_vector = bias_vector;
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent quantize_down_stage;
quantize_down_stage.result_offset_after_shift = output_offset;
quantize_down_stage.result_fixedpoint_multiplier = output_multiplier;
quantize_down_stage.result_exponent = output_left_shift;
gemmlowp::OutputStageClamp clamp_stage;
clamp_stage.min = output_activation_min;
clamp_stage.max = output_activation_max;
gemmlowp::OutputStageSaturatingCastToUint8 saturating_cast_stage;
return std::make_tuple(bias_addition_stage, quantize_down_stage,
clamp_stage, saturating_cast_stage);
}
};
struct GemmlowpOutputPipelineInt8 {
typedef gemmlowp::VectorMap<const int32, gemmlowp::VectorShape::Col>
ColVectorMap;
typedef std::tuple<gemmlowp::OutputStageBiasAddition<ColVectorMap>,
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent,
gemmlowp::OutputStageClamp,
gemmlowp::OutputStageSaturatingCastToInt8>
Pipeline;
static Pipeline MakeExp(const int32* bias_data, int output_rows,
int32 output_offset, int32 output_multiplier,
int output_left_shift, int32 output_activation_min,
int32 output_activation_max) {
ColVectorMap bias_vector(bias_data, output_rows);
gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage;
bias_addition_stage.bias_vector = bias_vector;
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent quantize_down_stage;
quantize_down_stage.result_offset_after_shift = output_offset;
quantize_down_stage.result_fixedpoint_multiplier = output_multiplier;
quantize_down_stage.result_exponent = output_left_shift;
gemmlowp::OutputStageClamp clamp_stage;
clamp_stage.min = output_activation_min;
clamp_stage.max = output_activation_max;
gemmlowp::OutputStageSaturatingCastToInt8 saturating_cast_stage;
return std::make_tuple(bias_addition_stage, quantize_down_stage,
clamp_stage, saturating_cast_stage);
}
};
#ifdef USE_NEON
inline void LegacyFullyConnectedAsGEMVWorkerImpl(
const RuntimeShape& input_shape, const uint8* input_data,
int32 input_offset, const RuntimeShape& filter_shape,
const uint8* filter_data, int32 filter_offset,
const RuntimeShape& bias_shape, const int32* bias_data, int32 output_offset,
int32 output_multiplier, int output_shift, int32 output_activation_min,
int32 output_activation_max, const RuntimeShape& output_shape,
uint8* output_data, int row_start, int row_end) {
ruy::profiler::ScopeLabel label("FullyConnectedAsGEMV/8bit");
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
const int output_dim_count = output_shape.DimensionsCount();
TFLITE_DCHECK_EQ(FlatSizeSkipDim(output_shape, output_dim_count - 1), 1);
const int input_size = FlatSizeSkipDim(input_shape, 0);
static constexpr int kPeel = 4;
const bool shift_left = (output_shift > 0);
for (int k = 0; k < input_size; k += 64) {
optimized_ops_preload_l1_stream(input_data + k);
}
for (int k = 0; k < kPeel * input_size; k += 64) {
optimized_ops_preload_l1_stream(filter_data + k);
}
TFLITE_DCHECK_GE(row_end - row_start, kPeel);
for (int out = row_start; out < row_end; out += kPeel) {
out = std::min(out, row_end - kPeel);
int32x4_t acc0 = vdupq_n_s32(0);
int32x4_t acc1 = acc0;
int32x4_t acc2 = acc0;
int32x4_t acc3 = acc0;
const int16x8_t input_offset_vec = vdupq_n_s16(input_offset);
const int16x8_t filter_offset_vec = vdupq_n_s16(filter_offset);
int in = 0;
for (; in <= input_size - 16; in += 16) {
const uint8x16_t input_val_u8 = vld1q_u8(input_data + in);
const uint8* filter_ptr = filter_data + in + out * input_size;
uint8x16_t filter_val_u8_0 = vld1q_u8(filter_ptr);
optimized_ops_preload_l1_stream(filter_ptr + 64);
filter_ptr += input_size;
uint8x16_t filter_val_u8_1 = vld1q_u8(filter_ptr);
optimized_ops_preload_l1_stream(filter_ptr + 64);
filter_ptr += input_size;
uint8x16_t filter_val_u8_2 = vld1q_u8(filter_ptr);
optimized_ops_preload_l1_stream(filter_ptr + 64);
filter_ptr += input_size;
uint8x16_t filter_val_u8_3 = vld1q_u8(filter_ptr);
optimized_ops_preload_l1_stream(filter_ptr + 64);
int16x8_t input_val_0, input_val_1;
uint8x8_t low = vget_low_u8(input_val_u8);
uint8x8_t high = vget_high_u8(input_val_u8);
input_val_0 = vreinterpretq_s16_u16(vmovl_u8(low));
input_val_1 = vreinterpretq_s16_u16(vmovl_u8(high));
input_val_0 = vaddq_s16(input_val_0, input_offset_vec);
input_val_1 = vaddq_s16(input_val_1, input_offset_vec);
low = vget_low_u8(filter_val_u8_0);
high = vget_high_u8(filter_val_u8_0);
int16x8_t filter_val_0_0 = vreinterpretq_s16_u16(vmovl_u8(low));
int16x8_t filter_val_0_1 = vreinterpretq_s16_u16(vmovl_u8(high));
filter_val_0_0 = vaddq_s16(filter_val_0_0, filter_offset_vec);
filter_val_0_1 = vaddq_s16(filter_val_0_1, filter_offset_vec);
low = vget_low_u8(filter_val_u8_1);
high = vget_high_u8(filter_val_u8_1);
int16x8_t filter_val_1_0 = vreinterpretq_s16_u16(vmovl_u8(low));
int16x8_t filter_val_1_1 = vreinterpretq_s16_u16(vmovl_u8(high));
filter_val_1_0 = vaddq_s16(filter_val_1_0, filter_offset_vec);
filter_val_1_1 = vaddq_s16(filter_val_1_1, filter_offset_vec);
low = vget_low_u8(filter_val_u8_2);
high = vget_high_u8(filter_val_u8_2);
int16x8_t filter_val_2_0 = vreinterpretq_s16_u16(vmovl_u8(low));
int16x8_t filter_val_2_1 = vreinterpretq_s16_u16(vmovl_u8(high));
filter_val_2_0 = vaddq_s16(filter_val_2_0, filter_offset_vec);
filter_val_2_1 = vaddq_s16(filter_val_2_1, filter_offset_vec);
low = vget_low_u8(filter_val_u8_3);
high = vget_high_u8(filter_val_u8_3);
int16x8_t filter_val_3_0 = vreinterpretq_s16_u16(vmovl_u8(low));
int16x8_t filter_val_3_1 = vreinterpretq_s16_u16(vmovl_u8(high));
filter_val_3_0 = vaddq_s16(filter_val_3_0, filter_offset_vec);
filter_val_3_1 = vaddq_s16(filter_val_3_1, filter_offset_vec);
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0_0),
vget_low_s16(input_val_0));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1_0),
vget_low_s16(input_val_0));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2_0),
vget_low_s16(input_val_0));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3_0),
vget_low_s16(input_val_0));
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0_1),
vget_low_s16(input_val_1));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1_1),
vget_low_s16(input_val_1));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2_1),
vget_low_s16(input_val_1));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3_1),
vget_low_s16(input_val_1));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0_0),
vget_high_s16(input_val_0));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1_0),
vget_high_s16(input_val_0));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2_0),
vget_high_s16(input_val_0));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3_0),
vget_high_s16(input_val_0));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0_1),
vget_high_s16(input_val_1));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1_1),
vget_high_s16(input_val_1));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2_1),
vget_high_s16(input_val_1));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3_1),
vget_high_s16(input_val_1));
}
for (; in <= input_size - 8; in += 8) {
const uint8x8_t input_val_u8 = vld1_u8(input_data + in);
const uint8* filter_ptr = filter_data + in + out * input_size;
uint8x8_t filter_val_u8_0 = vld1_u8(filter_ptr);
filter_ptr += input_size;
uint8x8_t filter_val_u8_1 = vld1_u8(filter_ptr);
filter_ptr += input_size;
uint8x8_t filter_val_u8_2 = vld1_u8(filter_ptr);
filter_ptr += input_size;
uint8x8_t filter_val_u8_3 = vld1_u8(filter_ptr);
int16x8_t input_val = vreinterpretq_s16_u16(vmovl_u8(input_val_u8));
input_val = vaddq_s16(input_val, input_offset_vec);
int16x8_t filter_val_0 = vreinterpretq_s16_u16(vmovl_u8(filter_val_u8_0));
filter_val_0 = vaddq_s16(filter_val_0, filter_offset_vec);
int16x8_t filter_val_1 = vreinterpretq_s16_u16(vmovl_u8(filter_val_u8_1));
filter_val_1 = vaddq_s16(filter_val_1, filter_offset_vec);
int16x8_t filter_val_2 = vreinterpretq_s16_u16(vmovl_u8(filter_val_u8_2));
filter_val_2 = vaddq_s16(filter_val_2, filter_offset_vec);
int16x8_t filter_val_3 = vreinterpretq_s16_u16(vmovl_u8(filter_val_u8_3));
filter_val_3 = vaddq_s16(filter_val_3, filter_offset_vec);
acc0 =
vmlal_s16(acc0, vget_low_s16(filter_val_0), vget_low_s16(input_val));
acc1 =
vmlal_s16(acc1, vget_low_s16(filter_val_1), vget_low_s16(input_val));
acc2 =
vmlal_s16(acc2, vget_low_s16(filter_val_2), vget_low_s16(input_val));
acc3 =
vmlal_s16(acc3, vget_low_s16(filter_val_3), vget_low_s16(input_val));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0),
vget_high_s16(input_val));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1),
vget_high_s16(input_val));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2),
vget_high_s16(input_val));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3),
vget_high_s16(input_val));
}
if (in < input_size) {
int32 buf[16];
vst1q_s32(buf + 0, acc0);
vst1q_s32(buf + 4, acc1);
vst1q_s32(buf + 8, acc2);
vst1q_s32(buf + 12, acc3);
for (; in < input_size; in++) {
int lane = (in + 8 - input_size) % 4;
const int32 input_val = input_data[in] + input_offset;
for (int k = 0; k < kPeel; k++) {
int32 filter_val =
filter_data[in + (out + k) * input_size] + filter_offset;
buf[lane + 4 * k] += filter_val * input_val;
}
}
acc0 = vld1q_s32(buf + 0);
acc1 = vld1q_s32(buf + 4);
acc2 = vld1q_s32(buf + 8);
acc3 = vld1q_s32(buf + 12);
}
// Horizontally reduce accumulators
int32x2_t pairwise_reduced_acc_0 =
vpadd_s32(vget_low_s32(acc0), vget_high_s32(acc0));
int32x2_t pairwise_reduced_acc_1 =
vpadd_s32(vget_low_s32(acc1), vget_high_s32(acc1));
int32x2_t pairwise_reduced_acc_2 =
vpadd_s32(vget_low_s32(acc2), vget_high_s32(acc2));
int32x2_t pairwise_reduced_acc_3 =
vpadd_s32(vget_low_s32(acc3), vget_high_s32(acc3));
const int32x2_t reduced_lo =
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
const int32x2_t reduced_hi =
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
// Add bias values.
int32x4_t bias_vec = vld1q_s32(bias_data + out);
reduced = vaddq_s32(reduced, bias_vec);
if (shift_left) {
const int32 multiplier_power_of_two = 1 << output_shift;
reduced = vmulq_n_s32(reduced, multiplier_power_of_two);
reduced = vqrdmulhq_n_s32(reduced, output_multiplier);
} else {
// Multiply by the fixed-point multiplier.
reduced = vqrdmulhq_n_s32(reduced, output_multiplier);
// Rounding-shift-right.
using gemmlowp::RoundingDivideByPOT;
reduced = RoundingDivideByPOT(reduced, -output_shift);
}
// Add the output offset.
const int32x4_t output_offset_vec = vdupq_n_s32(output_offset);
reduced = vaddq_s32(reduced, output_offset_vec);
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(reduced);
// Narrow values down to 8 bit unsigned, saturating.
uint8x8_t res8 = vqmovun_s16(vcombine_s16(res16, res16));
// Apply the clamping from the activation function
res8 = vmax_u8(res8, vdup_n_u8(output_activation_min));
res8 = vmin_u8(res8, vdup_n_u8(output_activation_max));
// Store results to destination.
vst1_lane_u8(output_data + out + 0, res8, 0);
vst1_lane_u8(output_data + out + 1, res8, 1);
vst1_lane_u8(output_data + out + 2, res8, 2);
vst1_lane_u8(output_data + out + 3, res8, 3);
}
}
struct LegacyFullyConnectedAsGEMVWorkerTask : public gemmlowp::Task {
LegacyFullyConnectedAsGEMVWorkerTask(
const RuntimeShape& input_shape, const uint8* input_data,
int32 input_offset, const RuntimeShape& filter_shape,
const uint8* filter_data, int32 filter_offset,
const RuntimeShape& bias_shape, const int32* bias_data,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
const RuntimeShape& output_shape, uint8* output_data, int row_start,
int row_end)
: input_shape_(input_shape),
input_data_(input_data),
input_offset_(input_offset),
filter_shape_(filter_shape),
filter_data_(filter_data),
filter_offset_(filter_offset),
bias_shape_(bias_shape),
bias_data_(bias_data),
output_offset_(output_offset),
output_multiplier_(output_multiplier),
output_shift_(output_shift),
output_activation_min_(output_activation_min),
output_activation_max_(output_activation_max),
output_shape_(output_shape),
output_data_(output_data),
row_start_(row_start),
row_end_(row_end) {}
void Run() override {
LegacyFullyConnectedAsGEMVWorkerImpl(
input_shape_, input_data_, input_offset_, filter_shape_, filter_data_,
filter_offset_, bias_shape_, bias_data_, output_offset_,
output_multiplier_, output_shift_, output_activation_min_,
output_activation_max_, output_shape_, output_data_, row_start_,
row_end_);
}
const RuntimeShape& input_shape_;
const uint8* input_data_;
int32 input_offset_;
const RuntimeShape& filter_shape_;
const uint8* filter_data_;
int32 filter_offset_;
const RuntimeShape& bias_shape_;
const int32* bias_data_;
int32 output_offset_;
int32 output_multiplier_;
int output_shift_;
int32 output_activation_min_;
int32 output_activation_max_;
const RuntimeShape& output_shape_;
uint8* output_data_;
int row_start_;
int row_end_;
};
inline void FullyConnectedAsGEMV(
const RuntimeShape& input_shape, const uint8* input_data,
int32 input_offset, const RuntimeShape& filter_shape,
const uint8* filter_data, int32 filter_offset,
const RuntimeShape& bias_shape, const int32* bias_data, int32 output_offset,
int32 output_multiplier, int output_shift, int32 output_activation_min,
int32 output_activation_max, const RuntimeShape& output_shape,
uint8* output_data, gemmlowp::GemmContext* gemmlowp_context) {
const int output_dim_count = output_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_rows = output_shape.Dims(output_dim_count - 1);
const int input_size = FlatSizeSkipDim(input_shape, 0);
static constexpr int kKernelRows = 4;
const int thread_count = gemmlowp::HowManyThreads<kKernelRows>(
gemmlowp_context->max_num_threads(), output_rows, batches, input_size);
if (thread_count == 1) {
// Single-thread case: do the computation on the current thread, don't
// use a threadpool
LegacyFullyConnectedAsGEMVWorkerImpl(
input_shape, input_data, input_offset, filter_shape, filter_data,
filter_offset, bias_shape, bias_data, output_offset, output_multiplier,
output_shift, output_activation_min, output_activation_max,
output_shape, output_data, 0, output_rows);
return;
}
// Multi-threaded case: use the gemmlowp context's threadpool.
TFLITE_DCHECK_GT(thread_count, 1);
std::vector<gemmlowp::Task*> tasks(thread_count);
const int kRowsPerWorker = gemmlowp::RoundUp<kKernelRows>(
gemmlowp::CeilQuotient(output_rows, thread_count));
int row_start = 0;
for (int i = 0; i < thread_count; ++i) {
int row_end = std::min(output_rows, row_start + kRowsPerWorker);
tasks[i] = new LegacyFullyConnectedAsGEMVWorkerTask(
input_shape, input_data, input_offset, filter_shape, filter_data,
filter_offset, bias_shape, bias_data, output_offset, output_multiplier,
output_shift, output_activation_min, output_activation_max,
output_shape, output_data, row_start, row_end);
row_start = row_end;
}
TFLITE_DCHECK_EQ(row_start, output_rows);
gemmlowp_context->workers_pool()->LegacyExecuteAndDestroyTasks(tasks);
}
#endif // USE_NEON
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label("FullyConnected/8bit");
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
#ifdef USE_NEON
if (batches == 1) {
const int output_size = MatchingDim(filter_shape, filter_dim_count - 2,
output_shape, output_dim_count - 1);
if (output_size >= 4) {
return FullyConnectedAsGEMV(
input_shape, input_data, input_offset, filter_shape, filter_data,
filter_offset, bias_shape, bias_data, output_offset,
output_multiplier, output_shift, output_activation_min,
output_activation_max, output_shape, output_data, gemmlowp_context);
}
}
#endif // USE_NEON
const int filter_rows = filter_shape.Dims(filter_dim_count - 2);
const int filter_cols = filter_shape.Dims(filter_dim_count - 1);
TFLITE_DCHECK_EQ(filter_shape.FlatSize(), filter_rows * filter_cols);
const int output_rows = output_shape.Dims(output_dim_count - 1);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::RowMajor> filter_matrix(
filter_data, output_rows, filter_cols, filter_cols);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::ColMajor> input_matrix(
input_data, filter_cols, batches, filter_cols);
gemmlowp::MatrixMap<uint8, gemmlowp::MapOrder::ColMajor> output_matrix(
output_data, output_rows, batches, output_rows);
const auto& output_pipeline = GemmlowpOutputPipeline::MakeExp(
bias_data, output_rows, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max);
gemmlowp::GemmWithOutputPipeline<uint8, uint8,
gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
gemmlowp_context, filter_matrix, input_matrix, &output_matrix,
filter_offset, input_offset, output_pipeline);
}
#ifdef GEMMLOWP_NEON
// In the common case of batch size 1, a fully-connected node degenerates
// to a matrix*vector product. LSTM cells contain a fully-connected node;
// when quantized, this becomes a special type of GEMV operation where
// the output is 16bit-quantized, thus needs its own special path.
inline void GEMVForLstmCell(const RuntimeShape& input_shape,
const uint8* input_data,
const RuntimeShape& weights_shape,
const uint8* weights_data, uint8 weights_zero_point,
const RuntimeShape& bias_shape,
const int32* bias_data, int32 accum_multiplier,
int accum_shift, const RuntimeShape& output_shape,
int16* output_data) {
ruy::profiler::ScopeLabel label("GEMVForLstmCell");
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
const int output_dim_count = output_shape.DimensionsCount();
const int weights_dim_count = weights_shape.DimensionsCount();
TFLITE_DCHECK_EQ(FlatSizeSkipDim(output_shape, output_dim_count - 1), 1);
const int input_size = FlatSizeSkipDim(input_shape, 0);
const int output_size = MatchingDim(weights_shape, weights_dim_count - 2,
output_shape, output_dim_count - 1);
// This special fast path for quantized LSTM cells does not try to support
// odd sizes that we haven't encountered in any LSTM cell, that would
// require special code (that would go untested until any LSTM cell
// exercises it). We just guard our assumptions about size evenness with
// the following assertions.
TFLITE_DCHECK(!(output_size % 4));
TFLITE_DCHECK(!(input_size % 8));
const int32* bias_ptr = bias_data;
int16* output_ptr = output_data;
for (int out = 0; out < output_size; out += 4) {
int32x4_t acc_0 = vdupq_n_s32(0);
int32x4_t acc_1 = vdupq_n_s32(0);
int32x4_t acc_2 = vdupq_n_s32(0);
int32x4_t acc_3 = vdupq_n_s32(0);
const int16x8_t input_offset_vec = vdupq_n_s16(-128);
const int16x8_t weights_offset_vec = vdupq_n_s16(-weights_zero_point);
int in = 0;
// Handle 16 levels of depth at a time.
for (; in <= input_size - 16; in += 16) {
const uint8x16_t input_val_u8 = vld1q_u8(input_data + in);
const uint8* weights_ptr = weights_data + in + out * input_size;
uint8x16_t weights_val_u8_0 = vld1q_u8(weights_ptr + 0 * input_size);
uint8x16_t weights_val_u8_1 = vld1q_u8(weights_ptr + 1 * input_size);
uint8x16_t weights_val_u8_2 = vld1q_u8(weights_ptr + 2 * input_size);
uint8x16_t weights_val_u8_3 = vld1q_u8(weights_ptr + 3 * input_size);
int16x8_t input_val_0, input_val_1;
const uint8x8_t low = vget_low_u8(input_val_u8);
const uint8x8_t high = vget_high_u8(input_val_u8);
input_val_0 = vreinterpretq_s16_u16(vmovl_u8(low));
input_val_1 = vreinterpretq_s16_u16(vmovl_u8(high));
input_val_0 = vaddq_s16(input_val_0, input_offset_vec);
input_val_1 = vaddq_s16(input_val_1, input_offset_vec);
int16x8_t weights_val_0_0, weights_val_1_0, weights_val_2_0,
weights_val_3_0;
int16x8_t weights_val_0_1, weights_val_1_1, weights_val_2_1,
weights_val_3_1;
weights_val_0_0 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(weights_val_u8_0))),
weights_offset_vec);
weights_val_0_1 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(weights_val_u8_0))),
weights_offset_vec);
weights_val_1_0 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(weights_val_u8_1))),
weights_offset_vec);
weights_val_1_1 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(weights_val_u8_1))),
weights_offset_vec);
weights_val_2_0 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(weights_val_u8_2))),
weights_offset_vec);
weights_val_2_1 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(weights_val_u8_2))),
weights_offset_vec);
weights_val_3_0 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(weights_val_u8_3))),
weights_offset_vec);
weights_val_3_1 = vaddq_s16(
vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(weights_val_u8_3))),
weights_offset_vec);
acc_0 = vmlal_s16(acc_0, vget_low_s16(weights_val_0_0),
vget_low_s16(input_val_0));
acc_1 = vmlal_s16(acc_1, vget_low_s16(weights_val_1_0),
vget_low_s16(input_val_0));
acc_2 = vmlal_s16(acc_2, vget_low_s16(weights_val_2_0),
vget_low_s16(input_val_0));
acc_3 = vmlal_s16(acc_3, vget_low_s16(weights_val_3_0),
vget_low_s16(input_val_0));
acc_0 = vmlal_s16(acc_0, vget_high_s16(weights_val_0_0),
vget_high_s16(input_val_0));
acc_1 = vmlal_s16(acc_1, vget_high_s16(weights_val_1_0),
vget_high_s16(input_val_0));
acc_2 = vmlal_s16(acc_2, vget_high_s16(weights_val_2_0),
vget_high_s16(input_val_0));
acc_3 = vmlal_s16(acc_3, vget_high_s16(weights_val_3_0),
vget_high_s16(input_val_0));
acc_0 = vmlal_s16(acc_0, vget_low_s16(weights_val_0_1),
vget_low_s16(input_val_1));
acc_1 = vmlal_s16(acc_1, vget_low_s16(weights_val_1_1),
vget_low_s16(input_val_1));
acc_2 = vmlal_s16(acc_2, vget_low_s16(weights_val_2_1),
vget_low_s16(input_val_1));
acc_3 = vmlal_s16(acc_3, vget_low_s16(weights_val_3_1),
vget_low_s16(input_val_1));
acc_0 = vmlal_s16(acc_0, vget_high_s16(weights_val_0_1),
vget_high_s16(input_val_1));
acc_1 = vmlal_s16(acc_1, vget_high_s16(weights_val_1_1),
vget_high_s16(input_val_1));
acc_2 = vmlal_s16(acc_2, vget_high_s16(weights_val_2_1),
vget_high_s16(input_val_1));
acc_3 = vmlal_s16(acc_3, vget_high_s16(weights_val_3_1),
vget_high_s16(input_val_1));
}
// Handle 8 levels of depth at a time.
for (; in < input_size; in += 8) {
const uint8x8_t input_val_u8 = vld1_u8(input_data + in);
const uint8* weights_ptr = weights_data + in + out * input_size;
uint8x8_t weights_val_u8_0 = vld1_u8(weights_ptr + 0 * input_size);
uint8x8_t weights_val_u8_1 = vld1_u8(weights_ptr + 1 * input_size);
uint8x8_t weights_val_u8_2 = vld1_u8(weights_ptr + 2 * input_size);
uint8x8_t weights_val_u8_3 = vld1_u8(weights_ptr + 3 * input_size);
int16x8_t input_val;
input_val = vreinterpretq_s16_u16(vmovl_u8(input_val_u8));
input_val = vaddq_s16(input_val, input_offset_vec);
int16x8_t weights_val_0, weights_val_1, weights_val_2, weights_val_3;
weights_val_0 =
vaddq_s16(vreinterpretq_s16_u16(vmovl_u8(weights_val_u8_0)),
weights_offset_vec);
weights_val_1 =
vaddq_s16(vreinterpretq_s16_u16(vmovl_u8(weights_val_u8_1)),
weights_offset_vec);
weights_val_2 =
vaddq_s16(vreinterpretq_s16_u16(vmovl_u8(weights_val_u8_2)),
weights_offset_vec);
weights_val_3 =
vaddq_s16(vreinterpretq_s16_u16(vmovl_u8(weights_val_u8_3)),
weights_offset_vec);
acc_0 = vmlal_s16(acc_0, vget_low_s16(weights_val_0),
vget_low_s16(input_val));
acc_1 = vmlal_s16(acc_1, vget_low_s16(weights_val_1),
vget_low_s16(input_val));
acc_2 = vmlal_s16(acc_2, vget_low_s16(weights_val_2),
vget_low_s16(input_val));
acc_3 = vmlal_s16(acc_3, vget_low_s16(weights_val_3),
vget_low_s16(input_val));
acc_0 = vmlal_s16(acc_0, vget_high_s16(weights_val_0),
vget_high_s16(input_val));
acc_1 = vmlal_s16(acc_1, vget_high_s16(weights_val_1),
vget_high_s16(input_val));
acc_2 = vmlal_s16(acc_2, vget_high_s16(weights_val_2),
vget_high_s16(input_val));
acc_3 = vmlal_s16(acc_3, vget_high_s16(weights_val_3),
vget_high_s16(input_val));
}
// Horizontally reduce accumulators
int32x2_t pairwise_reduced_acc_0, pairwise_reduced_acc_1,
pairwise_reduced_acc_2, pairwise_reduced_acc_3;
pairwise_reduced_acc_0 =
vpadd_s32(vget_low_s32(acc_0), vget_high_s32(acc_0));
pairwise_reduced_acc_1 =
vpadd_s32(vget_low_s32(acc_1), vget_high_s32(acc_1));
pairwise_reduced_acc_2 =
vpadd_s32(vget_low_s32(acc_2), vget_high_s32(acc_2));
pairwise_reduced_acc_3 =
vpadd_s32(vget_low_s32(acc_3), vget_high_s32(acc_3));
const int32x2_t reduced_lo =
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
const int32x2_t reduced_hi =
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
// Add bias values.
int32x4_t bias_vec = vld1q_s32(bias_ptr);
bias_ptr += 4;
reduced = vaddq_s32(reduced, bias_vec);
int left_shift = accum_shift > 0 ? accum_shift : 0;
int right_shift = accum_shift > 0 ? 0 : -accum_shift;
reduced = vshlq_s32(reduced, vdupq_n_s32(left_shift));
// Multiply by the fixed-point multiplier.
reduced = vqrdmulhq_n_s32(reduced, accum_multiplier);
// Rounding-shift-right.
using gemmlowp::RoundingDivideByPOT;
reduced = RoundingDivideByPOT(reduced, right_shift);
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(reduced);
vst1_s16(output_ptr, res16);
output_ptr += 4;
}
}
#endif
#ifdef GEMMLOWP_NEON
inline void GEMVForLstmCellWithSymmetricRange(
const RuntimeShape& input_shape, const uint8* input_data,
const RuntimeShape& weights_shape, const uint8* weights_data,
const RuntimeShape& bias_shape, const int32* bias_data,
int32 accum_multiplier, int accum_shift, const RuntimeShape& output_shape,
int16* output_data) {
ruy::profiler::ScopeLabel label("GEMVForLstmCellWithSymmetricRange");
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
const int output_dim_count = output_shape.DimensionsCount();
const int weights_dim_count = weights_shape.DimensionsCount();
TFLITE_DCHECK_EQ(FlatSizeSkipDim(output_shape, output_dim_count - 1), 1);
const int input_size = FlatSizeSkipDim(input_shape, 0);
const int output_size = MatchingDim(weights_shape, weights_dim_count - 2,
output_shape, output_dim_count - 1);
// This special fast path for quantized LSTM cells does not try to support
// odd sizes that we haven't encountered in any LSTM cell, that would
// require special code (that would go untested until any LSTM cell
// exercises it). We just guard our assumptions about size evenness with
// the following assertions.
TFLITE_DCHECK(!(output_size % 4));
TFLITE_DCHECK(!(input_size % 64));
const int32* bias_ptr = bias_data;
int16* output_ptr = output_data;
const uint8x16_t signbit = vdupq_n_u8(0x80);
for (int in = 0; in < input_size; in += 32) {
optimized_ops_preload_l1_keep(input_data + in);
}
const int left_shift = accum_shift > 0 ? accum_shift : 0;
const int right_shift = accum_shift > 0 ? 0 : -accum_shift;
for (int out = 0; out < output_size; out += 4) {
// Load the bias values
int32x4_t bias_vec = vld1q_s32(bias_ptr);
bias_ptr += 4;
// Clear accumulators. We use 2 accumulator registers per row,
// for 4 rows. row_accumRN is the N-th accumulator for row R.
int32x4_t row_accum00 = vdupq_n_s32(0);
int32x4_t row_accum01 = vdupq_n_s32(0);
int32x4_t row_accum10 = vdupq_n_s32(0);
int32x4_t row_accum11 = vdupq_n_s32(0);
int32x4_t row_accum20 = vdupq_n_s32(0);
int32x4_t row_accum21 = vdupq_n_s32(0);
int32x4_t row_accum30 = vdupq_n_s32(0);
int32x4_t row_accum31 = vdupq_n_s32(0);
// kReadAhead parametrizes how far ahead we prefetch weights into L1 cache.
const int kReadAhead = 512;
// Prefetch the first weights values.
for (int k = 0; k < kReadAhead; k += 64) {
optimized_ops_preload_l1_stream(weights_data + (out + 0) * input_size +
k);
optimized_ops_preload_l1_stream(weights_data + (out + 1) * input_size +
k);
optimized_ops_preload_l1_stream(weights_data + (out + 2) * input_size +
k);
optimized_ops_preload_l1_stream(weights_data + (out + 3) * input_size +
k);
}
// Loop along the rows, handling 64 bytes per iteration because that's
// cache line size on most current ARM-architecture CPUs.
for (int in = 0; in < input_size; in += 64) {
// Prefetch some future weights values.
optimized_ops_preload_l1_stream(weights_data + (out + 0) * input_size +
in + kReadAhead);
optimized_ops_preload_l1_stream(weights_data + (out + 1) * input_size +
in + kReadAhead);
optimized_ops_preload_l1_stream(weights_data + (out + 2) * input_size +
in + kReadAhead);
optimized_ops_preload_l1_stream(weights_data + (out + 3) * input_size +
in + kReadAhead);
// We will use 2 local 16-bit accumulators per row, for 2 rows.
// See below (*) for the rationale of processing only 2 rows at a time.
// local_accumRN is the N-th local accumulator for row R.
int16x8_t local_accum00;
int16x8_t local_accum01;
int16x8_t local_accum10;
int16x8_t local_accum11;
// Load 64 bytes of input activations values. Convert to signed int8
// by flipping the sign bit (i.e. subtracting 128, the required
// zero_point value).
int8x16_t input0 = vreinterpretq_s8_u8(
veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 0)));
int8x16_t input1 = vreinterpretq_s8_u8(
veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 1)));
int8x16_t input2 = vreinterpretq_s8_u8(
veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 2)));
int8x16_t input3 = vreinterpretq_s8_u8(
veorq_u8(signbit, vld1q_u8(input_data + in + 16 * 3)));
// Beginning of the core accumulation. Notice how while we have 4
// rows to process, this code is taking care of only 2 rows at a time,
// thus being divided into two parts looking similar ("Rows 0 and 1" and
// "Rows 2 and 3").
//
// (*) The rationale for handling only 2 rows at a time is to avoid
// cache aliasing issues on 4-way set-associative L1-cache CPUs, such
// as Cortex-A53. With sufficiently large, power-of-two matrix dimensions,
// we may find ourselves in a situation where rows alias each other in
// the L1 cache, and moreover may also mutually alias with the input
// activations. If we try to load 4 rows at a time, together with the
// input activations, that may be 5 mutually-aliasing vectors, resulting
// in constant mutual eviction from L1 cache. Handling 2 rows at a time
// here largely mitigates these issues, and seems at least to be very
// effective on Cortex-A53:
// Before After
// big (Cortex-A73) 2.85 ms 2.85 ms
// little (Cortex-A53) 11.0 ms 5.16 ms
// Rows 0 and 1:
// Load 64 bytes of weights values from each row. Convert to signed int8
// by flipping the sign bit (i.e. subtracting 128, the required
// zero_point value).
int8x16_t weights00 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 0) * input_size + in + 16 * 0)));
int8x16_t weights01 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 0) * input_size + in + 16 * 1)));
int8x16_t weights02 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 0) * input_size + in + 16 * 2)));
int8x16_t weights03 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 0) * input_size + in + 16 * 3)));
int8x16_t weights10 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 1) * input_size + in + 16 * 0)));
int8x16_t weights11 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 1) * input_size + in + 16 * 1)));
int8x16_t weights12 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 1) * input_size + in + 16 * 2)));
int8x16_t weights13 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 1) * input_size + in + 16 * 3)));
// Multiply-accumulate into local 16-bit accumulators.
// We can accumulate two products without overflow because weights are
// required to never be -128, so each product is at most 127^2 in absolute
// value.
local_accum00 = vmull_s8(vget_low_s8(weights00), vget_low_s8(input0));
local_accum01 = vmull_s8(vget_low_s8(weights01), vget_low_s8(input1));
local_accum10 = vmull_s8(vget_low_s8(weights10), vget_low_s8(input0));
local_accum11 = vmull_s8(vget_low_s8(weights11), vget_low_s8(input1));
local_accum00 = vmlal_s8(local_accum00, vget_high_s8(weights00),
vget_high_s8(input0));
local_accum01 = vmlal_s8(local_accum01, vget_high_s8(weights01),
vget_high_s8(input1));
local_accum10 = vmlal_s8(local_accum10, vget_high_s8(weights10),
vget_high_s8(input0));
local_accum11 = vmlal_s8(local_accum11, vget_high_s8(weights11),
vget_high_s8(input1));
// Pairwise add and accumulate into 32-bit accumulators
row_accum00 = vpadalq_s16(row_accum00, local_accum00);
row_accum01 = vpadalq_s16(row_accum01, local_accum01);
row_accum10 = vpadalq_s16(row_accum10, local_accum10);
row_accum11 = vpadalq_s16(row_accum11, local_accum11);
// Multiply-accumulate into local 16-bit accumulators.
// We can accumulate two products without overflow because weights are
// required to never be -128, so each product is at most 127^2 in absolute
// value.
local_accum00 = vmull_s8(vget_low_s8(weights02), vget_low_s8(input2));
local_accum01 = vmull_s8(vget_low_s8(weights03), vget_low_s8(input3));
local_accum10 = vmull_s8(vget_low_s8(weights12), vget_low_s8(input2));
local_accum11 = vmull_s8(vget_low_s8(weights13), vget_low_s8(input3));
local_accum00 = vmlal_s8(local_accum00, vget_high_s8(weights02),
vget_high_s8(input2));
local_accum01 = vmlal_s8(local_accum01, vget_high_s8(weights03),
vget_high_s8(input3));
local_accum10 = vmlal_s8(local_accum10, vget_high_s8(weights12),
vget_high_s8(input2));
local_accum11 = vmlal_s8(local_accum11, vget_high_s8(weights13),
vget_high_s8(input3));
// Pairwise add and accumulate into 32-bit accumulators
row_accum00 = vpadalq_s16(row_accum00, local_accum00);
row_accum01 = vpadalq_s16(row_accum01, local_accum01);
row_accum10 = vpadalq_s16(row_accum10, local_accum10);
row_accum11 = vpadalq_s16(row_accum11, local_accum11);
// Rows 2 and 3:
// Load 64 bytes of weights values from each row. Convert to signed int8
// by flipping the sign bit (i.e. subtracting 128, the required
// zero_point value).
weights00 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 2) * input_size + in + 16 * 0)));
weights01 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 2) * input_size + in + 16 * 1)));
weights02 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 2) * input_size + in + 16 * 2)));
weights03 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 2) * input_size + in + 16 * 3)));
weights10 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 3) * input_size + in + 16 * 0)));
weights11 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 3) * input_size + in + 16 * 1)));
weights12 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 3) * input_size + in + 16 * 2)));
weights13 = vreinterpretq_s8_u8(veorq_u8(
signbit,
vld1q_u8(weights_data + (out + 3) * input_size + in + 16 * 3)));
// Multiply-accumulate into local 16-bit accumulators.
// We can accumulate two products without overflow because weights are
// required to never be -128, so each product is at most 127^2 in absolute
// value.
local_accum00 = vmull_s8(vget_low_s8(weights00), vget_low_s8(input0));
local_accum01 = vmull_s8(vget_low_s8(weights01), vget_low_s8(input1));
local_accum10 = vmull_s8(vget_low_s8(weights10), vget_low_s8(input0));
local_accum11 = vmull_s8(vget_low_s8(weights11), vget_low_s8(input1));
local_accum00 = vmlal_s8(local_accum00, vget_high_s8(weights00),
vget_high_s8(input0));
local_accum01 = vmlal_s8(local_accum01, vget_high_s8(weights01),
vget_high_s8(input1));
local_accum10 = vmlal_s8(local_accum10, vget_high_s8(weights10),
vget_high_s8(input0));
local_accum11 = vmlal_s8(local_accum11, vget_high_s8(weights11),
vget_high_s8(input1));
// Pairwise add and accumulate into 32-bit accumulators
row_accum20 = vpadalq_s16(row_accum20, local_accum00);
row_accum21 = vpadalq_s16(row_accum21, local_accum01);
row_accum30 = vpadalq_s16(row_accum30, local_accum10);
row_accum31 = vpadalq_s16(row_accum31, local_accum11);
// Multiply-accumulate into local 16-bit accumulators.
// We can accumulate two products without overflow because weights are
// required to never be -128, so each product is at most 127^2 in absolute
// value.
local_accum00 = vmull_s8(vget_low_s8(weights02), vget_low_s8(input2));
local_accum01 = vmull_s8(vget_low_s8(weights03), vget_low_s8(input3));
local_accum10 = vmull_s8(vget_low_s8(weights12), vget_low_s8(input2));
local_accum11 = vmull_s8(vget_low_s8(weights13), vget_low_s8(input3));
local_accum00 = vmlal_s8(local_accum00, vget_high_s8(weights02),
vget_high_s8(input2));
local_accum01 = vmlal_s8(local_accum01, vget_high_s8(weights03),
vget_high_s8(input3));
local_accum10 = vmlal_s8(local_accum10, vget_high_s8(weights12),
vget_high_s8(input2));
local_accum11 = vmlal_s8(local_accum11, vget_high_s8(weights13),
vget_high_s8(input3));
// Pairwise add and accumulate into 32-bit accumulators
row_accum20 = vpadalq_s16(row_accum20, local_accum00);
row_accum21 = vpadalq_s16(row_accum21, local_accum01);
row_accum30 = vpadalq_s16(row_accum30, local_accum10);
row_accum31 = vpadalq_s16(row_accum31, local_accum11);
}
row_accum00 = vaddq_s32(row_accum00, row_accum01);
row_accum10 = vaddq_s32(row_accum10, row_accum11);
row_accum20 = vaddq_s32(row_accum20, row_accum21);
row_accum30 = vaddq_s32(row_accum30, row_accum31);
// Horizontally reduce accumulators
int32x2_t pairwise_reduced_acc_0, pairwise_reduced_acc_1,
pairwise_reduced_acc_2, pairwise_reduced_acc_3;
pairwise_reduced_acc_0 =
vpadd_s32(vget_low_s32(row_accum00), vget_high_s32(row_accum00));
pairwise_reduced_acc_1 =
vpadd_s32(vget_low_s32(row_accum10), vget_high_s32(row_accum10));
pairwise_reduced_acc_2 =
vpadd_s32(vget_low_s32(row_accum20), vget_high_s32(row_accum20));
pairwise_reduced_acc_3 =
vpadd_s32(vget_low_s32(row_accum30), vget_high_s32(row_accum30));
const int32x2_t reduced_lo =
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
const int32x2_t reduced_hi =
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
// Add bias values.
reduced = vaddq_s32(reduced, bias_vec);
reduced = vshlq_s32(reduced, vdupq_n_s32(left_shift));
// Multiply by the fixed-point multiplier.
reduced = vqrdmulhq_n_s32(reduced, accum_multiplier);
// Rounding-shift-right.
using gemmlowp::RoundingDivideByPOT;
reduced = RoundingDivideByPOT(reduced, right_shift);
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(reduced);
vst1_s16(output_ptr, res16);
output_ptr += 4;
}
}
#endif
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data_int32, const RuntimeShape& output_shape,
int16* output_data, gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label("FullyConnected/Uint8Int16");
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
// This is a copy of the reference implementation. We do not currently have a
// properly optimized version.
(void)gemmlowp_context; // only used in properly optimized code.
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(output_offset, 0);
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
// Implementation of the fully connected node suited to the inside of an LSTM
// cell. The operands are 8-bit integers, the accumulators are internally
// 32bit integers, and the output is 16-bit fixed-point with 3 integer bits so
// the output range is [-2^3, 2^3] == [-8, 8]. The rationale for that
// is explained in the function comment above.
#ifdef GEMMLOWP_NEON
if (batches == 1 && input_offset == -128 && output_activation_min == -32768 &&
output_activation_max == 32767) {
if (filter_offset == -128 && !(output_depth % 4) && !(accum_depth % 64)) {
GEMVForLstmCellWithSymmetricRange(
input_shape, input_data, filter_shape, filter_data, bias_shape,
bias_data_int32, output_multiplier, output_shift, output_shape,
output_data);
return;
}
if (!(output_depth % 4) && !(accum_depth % 8)) {
GEMVForLstmCell(input_shape, input_data, filter_shape, filter_data,
filter_offset, bias_shape, bias_data_int32,
output_multiplier, output_shift, output_shape,
output_data);
return;
}
}
#endif
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::RowMajor> weights_matrix(
filter_data, output_depth, accum_depth);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::ColMajor> input_matrix(
input_data, accum_depth, batches);
gemmlowp::MatrixMap<int16, gemmlowp::MapOrder::ColMajor> output_matrix(
output_data, output_depth, batches);
typedef gemmlowp::VectorMap<const int32, gemmlowp::VectorShape::Col>
ColVectorMap;
ColVectorMap bias_vector(bias_data_int32, output_depth);
gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage;
bias_addition_stage.bias_vector = bias_vector;
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent scale_stage;
scale_stage.result_offset_after_shift = 0;
scale_stage.result_fixedpoint_multiplier = output_multiplier;
// Note that this shift is negated wrt ordinary FC.
scale_stage.result_exponent = output_shift;
gemmlowp::OutputStageClamp clamp_stage;
clamp_stage.min = output_activation_min;
clamp_stage.max = output_activation_max;
gemmlowp::OutputStageSaturatingCastToInt16 saturating_cast_int16_stage;
auto output_pipeline =
std::make_tuple(bias_addition_stage, scale_stage, clamp_stage,
saturating_cast_int16_stage);
gemmlowp::GemmWithOutputPipeline<uint8, int16,
gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
gemmlowp_context, weights_matrix, input_matrix, &output_matrix,
filter_offset, input_offset, output_pipeline);
}
inline void FullyConnected(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::FullyConnectedParams op_params;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
FullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(bias_dims),
bias_data, DimsToShape(output_dims), output_data,
gemmlowp_context);
}
inline void FullyConnected(
const uint8* input_data, const Dims<4>& input_dims, int32 input_offset,
const uint8* filter_data, const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data_int32, const Dims<4>& bias_dims, int32 output_offset,
int32 output_multiplier, int output_shift, int32 output_activation_min,
int32 output_activation_max, int16* output_data, const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::FullyConnectedParams op_params;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
FullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(bias_dims),
bias_data_int32, DimsToShape(output_dims), output_data,
gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void FullyConnected(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
FullyConnected(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, output_offset,
output_multiplier, output_shift, output_activation_min,
output_activation_max, output_data, output_dims,
gemmlowp_context);
}
#ifdef USE_NEON
inline void LegacyInt8FullyConnectedAsGEMVWorkerImpl(
const RuntimeShape& input_shape, const int8_t* input_data,
int32 input_offset, const RuntimeShape& filter_shape,
const int8_t* filter_data, int32 filter_offset,
const RuntimeShape& bias_shape, const int32* bias_data, int32 output_offset,
int32 output_multiplier, int output_shift, int32 output_activation_min,
int32 output_activation_max, const RuntimeShape& output_shape,
int8_t* output_data, int row_start, int row_end) {
ruy::profiler::ScopeLabel label("FullyConnectedAsGEMVInt8/8bit");
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
const int output_dim_count = output_shape.DimensionsCount();
TFLITE_DCHECK_EQ(FlatSizeSkipDim(output_shape, output_dim_count - 1), 1);
const int input_size = FlatSizeSkipDim(input_shape, 0);
static constexpr int kPeel = 4;
const bool shift_left = (output_shift > 0);
TFLITE_DCHECK_GE(row_end - row_start, kPeel);
for (int out = row_start; out < row_end; out += kPeel) {
out = std::min(out, row_end - kPeel);
int32x4_t acc0 = vdupq_n_s32(0);
int32x4_t acc1 = acc0;
int32x4_t acc2 = acc0;
int32x4_t acc3 = acc0;
const int16x8_t input_offset_vec = vdupq_n_s16(input_offset);
const int16x8_t filter_offset_vec = vdupq_n_s16(filter_offset);
int in = 0;
for (; in <= input_size - 16; in += 16) {
const int8x16_t input_val_s8 = vld1q_s8(input_data + in);
const int8_t* filter_ptr = filter_data + in + out * input_size;
int8x16_t filter_val_s8_0 = vld1q_s8(filter_ptr);
filter_ptr += input_size;
int8x16_t filter_val_s8_1 = vld1q_s8(filter_ptr);
filter_ptr += input_size;
int8x16_t filter_val_s8_2 = vld1q_s8(filter_ptr);
filter_ptr += input_size;
int8x16_t filter_val_s8_3 = vld1q_s8(filter_ptr);
int16x8_t input_val_0, input_val_1;
int8x8_t low = vget_low_s8(input_val_s8);
int8x8_t high = vget_high_s8(input_val_s8);
input_val_0 = vmovl_s8(low);
input_val_1 = vmovl_s8(high);
input_val_0 = vaddq_s16(input_val_0, input_offset_vec);
input_val_1 = vaddq_s16(input_val_1, input_offset_vec);
low = vget_low_s8(filter_val_s8_0);
high = vget_high_s8(filter_val_s8_0);
int16x8_t filter_val_0_0 = vmovl_s8(low);
int16x8_t filter_val_0_1 = vmovl_s8(high);
filter_val_0_0 = vaddq_s16(filter_val_0_0, filter_offset_vec);
filter_val_0_1 = vaddq_s16(filter_val_0_1, filter_offset_vec);
low = vget_low_s8(filter_val_s8_1);
high = vget_high_s8(filter_val_s8_1);
int16x8_t filter_val_1_0 = vmovl_s8(low);
int16x8_t filter_val_1_1 = vmovl_s8(high);
filter_val_1_0 = vaddq_s16(filter_val_1_0, filter_offset_vec);
filter_val_1_1 = vaddq_s16(filter_val_1_1, filter_offset_vec);
low = vget_low_s8(filter_val_s8_2);
high = vget_high_s8(filter_val_s8_2);
int16x8_t filter_val_2_0 = vmovl_s8(low);
int16x8_t filter_val_2_1 = vmovl_s8(high);
filter_val_2_0 = vaddq_s16(filter_val_2_0, filter_offset_vec);
filter_val_2_1 = vaddq_s16(filter_val_2_1, filter_offset_vec);
low = vget_low_s8(filter_val_s8_3);
high = vget_high_s8(filter_val_s8_3);
int16x8_t filter_val_3_0 = vmovl_s8(low);
int16x8_t filter_val_3_1 = vmovl_s8(high);
filter_val_3_0 = vaddq_s16(filter_val_3_0, filter_offset_vec);
filter_val_3_1 = vaddq_s16(filter_val_3_1, filter_offset_vec);
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0_0),
vget_low_s16(input_val_0));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1_0),
vget_low_s16(input_val_0));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2_0),
vget_low_s16(input_val_0));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3_0),
vget_low_s16(input_val_0));
acc0 = vmlal_s16(acc0, vget_low_s16(filter_val_0_1),
vget_low_s16(input_val_1));
acc1 = vmlal_s16(acc1, vget_low_s16(filter_val_1_1),
vget_low_s16(input_val_1));
acc2 = vmlal_s16(acc2, vget_low_s16(filter_val_2_1),
vget_low_s16(input_val_1));
acc3 = vmlal_s16(acc3, vget_low_s16(filter_val_3_1),
vget_low_s16(input_val_1));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0_0),
vget_high_s16(input_val_0));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1_0),
vget_high_s16(input_val_0));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2_0),
vget_high_s16(input_val_0));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3_0),
vget_high_s16(input_val_0));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0_1),
vget_high_s16(input_val_1));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1_1),
vget_high_s16(input_val_1));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2_1),
vget_high_s16(input_val_1));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3_1),
vget_high_s16(input_val_1));
}
for (; in <= input_size - 8; in += 8) {
const int8x8_t input_val_s8 = vld1_s8(input_data + in);
const int8_t* filter_ptr = filter_data + in + out * input_size;
int8x8_t filter_val_s8_0 = vld1_s8(filter_ptr);
filter_ptr += input_size;
int8x8_t filter_val_s8_1 = vld1_s8(filter_ptr);
filter_ptr += input_size;
int8x8_t filter_val_s8_2 = vld1_s8(filter_ptr);
filter_ptr += input_size;
int8x8_t filter_val_s8_3 = vld1_s8(filter_ptr);
int16x8_t input_val = vmovl_s8(input_val_s8);
input_val = vaddq_s16(input_val, input_offset_vec);
int16x8_t filter_val_0 = vmovl_s8(filter_val_s8_0);
filter_val_0 = vaddq_s16(filter_val_0, filter_offset_vec);
int16x8_t filter_val_1 = vmovl_s8(filter_val_s8_1);
filter_val_1 = vaddq_s16(filter_val_1, filter_offset_vec);
int16x8_t filter_val_2 = vmovl_s8(filter_val_s8_2);
filter_val_2 = vaddq_s16(filter_val_2, filter_offset_vec);
int16x8_t filter_val_3 = vmovl_s8(filter_val_s8_3);
filter_val_3 = vaddq_s16(filter_val_3, filter_offset_vec);
acc0 =
vmlal_s16(acc0, vget_low_s16(filter_val_0), vget_low_s16(input_val));
acc1 =
vmlal_s16(acc1, vget_low_s16(filter_val_1), vget_low_s16(input_val));
acc2 =
vmlal_s16(acc2, vget_low_s16(filter_val_2), vget_low_s16(input_val));
acc3 =
vmlal_s16(acc3, vget_low_s16(filter_val_3), vget_low_s16(input_val));
acc0 = vmlal_s16(acc0, vget_high_s16(filter_val_0),
vget_high_s16(input_val));
acc1 = vmlal_s16(acc1, vget_high_s16(filter_val_1),
vget_high_s16(input_val));
acc2 = vmlal_s16(acc2, vget_high_s16(filter_val_2),
vget_high_s16(input_val));
acc3 = vmlal_s16(acc3, vget_high_s16(filter_val_3),
vget_high_s16(input_val));
}
if (in < input_size) {
int32 buf[16];
vst1q_s32(buf + 0, acc0);
vst1q_s32(buf + 4, acc1);
vst1q_s32(buf + 8, acc2);
vst1q_s32(buf + 12, acc3);
for (; in < input_size; in++) {
int lane = (in + 8 - input_size) % 4;
const int32 input_val = input_data[in] + input_offset;
for (int k = 0; k < kPeel; k++) {
int32 filter_val =
filter_data[in + (out + k) * input_size] + filter_offset;
buf[lane + 4 * k] += filter_val * input_val;
}
}
acc0 = vld1q_s32(buf + 0);
acc1 = vld1q_s32(buf + 4);
acc2 = vld1q_s32(buf + 8);
acc3 = vld1q_s32(buf + 12);
}
// Horizontally reduce accumulators
int32x2_t pairwise_reduced_acc_0 =
vpadd_s32(vget_low_s32(acc0), vget_high_s32(acc0));
int32x2_t pairwise_reduced_acc_1 =
vpadd_s32(vget_low_s32(acc1), vget_high_s32(acc1));
int32x2_t pairwise_reduced_acc_2 =
vpadd_s32(vget_low_s32(acc2), vget_high_s32(acc2));
int32x2_t pairwise_reduced_acc_3 =
vpadd_s32(vget_low_s32(acc3), vget_high_s32(acc3));
const int32x2_t reduced_lo =
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
const int32x2_t reduced_hi =
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
// Add bias values.
int32x4_t bias_vec = vld1q_s32(bias_data + out);
reduced = vaddq_s32(reduced, bias_vec);
if (shift_left) {
const int32 multiplier_power_of_two = 1 << output_shift;
reduced = vmulq_n_s32(reduced, multiplier_power_of_two);
reduced = vqrdmulhq_n_s32(reduced, output_multiplier);
} else {
// Multiply by the fixed-point multiplier.
reduced = vqrdmulhq_n_s32(reduced, output_multiplier);
// Rounding-shift-right.
using gemmlowp::RoundingDivideByPOT;
reduced = RoundingDivideByPOT(reduced, -output_shift);
}
// Add the output offset.
const int32x4_t output_offset_vec = vdupq_n_s32(output_offset);
reduced = vaddq_s32(reduced, output_offset_vec);
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(reduced);
// Narrow values down to 8 bit signed, saturating.
int8x8_t res8 = vqmovn_s16(vcombine_s16(res16, res16));
// Apply the clamping from the activation function
res8 = vmax_s8(res8, vdup_n_s8(output_activation_min));
res8 = vmin_s8(res8, vdup_n_s8(output_activation_max));
// Store results to destination.
vst1_lane_s8(output_data + out + 0, res8, 0);
vst1_lane_s8(output_data + out + 1, res8, 1);
vst1_lane_s8(output_data + out + 2, res8, 2);
vst1_lane_s8(output_data + out + 3, res8, 3);
}
}
struct LegacyInt8FullyConnectedAsGEMVWorkerTask : public gemmlowp::Task {
LegacyInt8FullyConnectedAsGEMVWorkerTask(
const RuntimeShape& input_shape, const int8_t* input_data,
int32 input_offset, const RuntimeShape& filter_shape,
const int8_t* filter_data, int32 filter_offset,
const RuntimeShape& bias_shape, const int32* bias_data,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
const RuntimeShape& output_shape, int8_t* output_data, int row_start,
int row_end)
: input_shape_(input_shape),
input_data_(input_data),
input_offset_(input_offset),
filter_shape_(filter_shape),
filter_data_(filter_data),
filter_offset_(filter_offset),
bias_shape_(bias_shape),
bias_data_(bias_data),
output_offset_(output_offset),
output_multiplier_(output_multiplier),
output_shift_(output_shift),
output_activation_min_(output_activation_min),
output_activation_max_(output_activation_max),
output_shape_(output_shape),
output_data_(output_data),
row_start_(row_start),
row_end_(row_end) {}
void Run() override {
LegacyInt8FullyConnectedAsGEMVWorkerImpl(
input_shape_, input_data_, input_offset_, filter_shape_, filter_data_,
filter_offset_, bias_shape_, bias_data_, output_offset_,
output_multiplier_, output_shift_, output_activation_min_,
output_activation_max_, output_shape_, output_data_, row_start_,
row_end_);
}
const RuntimeShape& input_shape_;
const int8_t* input_data_;
int32 input_offset_;
const RuntimeShape& filter_shape_;
const int8_t* filter_data_;
int32 filter_offset_;
const RuntimeShape& bias_shape_;
const int32* bias_data_;
int32 output_offset_;
int32 output_multiplier_;
int output_shift_;
int32 output_activation_min_;
int32 output_activation_max_;
const RuntimeShape& output_shape_;
int8_t* output_data_;
int row_start_;
int row_end_;
};
inline void LegacyInt8FullyConnectedAsGEMV(
const RuntimeShape& input_shape, const int8_t* input_data,
int32 input_offset, const RuntimeShape& filter_shape,
const int8_t* filter_data, int32 filter_offset,
const RuntimeShape& bias_shape, const int32* bias_data, int32 output_offset,
int32 output_multiplier, int output_shift, int32 output_activation_min,
int32 output_activation_max, const RuntimeShape& output_shape,
int8_t* output_data, gemmlowp::GemmContext* gemmlowp_context) {
const int output_dim_count = output_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_rows = output_shape.Dims(output_dim_count - 1);
const int input_size = FlatSizeSkipDim(input_shape, 0);
static constexpr int kKernelRows = 4;
const int thread_count = gemmlowp::HowManyThreads<kKernelRows>(
gemmlowp_context->max_num_threads(), output_rows, batches, input_size);
if (thread_count == 1) {
// Single-thread case: do the computation on the current thread, don't
// use a threadpool
LegacyInt8FullyConnectedAsGEMVWorkerImpl(
input_shape, input_data, input_offset, filter_shape, filter_data,
filter_offset, bias_shape, bias_data, output_offset, output_multiplier,
output_shift, output_activation_min, output_activation_max,
output_shape, output_data, 0, output_rows);
return;
}
// Multi-threaded case: use the gemmlowp context's threadpool.
TFLITE_DCHECK_GT(thread_count, 1);
std::vector<LegacyInt8FullyConnectedAsGEMVWorkerTask> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(thread_count);
const int kRowsPerWorker = gemmlowp::RoundUp<kKernelRows>(
gemmlowp::CeilQuotient(output_rows, thread_count));
int row_start = 0;
for (int i = 0; i < thread_count; ++i) {
int row_end = std::min(output_rows, row_start + kRowsPerWorker);
tasks.emplace_back(input_shape, input_data, input_offset, filter_shape,
filter_data, filter_offset, bias_shape, bias_data,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max,
output_shape, output_data, row_start, row_end);
row_start = row_end;
}
TFLITE_DCHECK_EQ(row_start, output_rows);
gemmlowp_context->workers_pool()->Execute(tasks.size(), tasks.data());
}
#endif // USE_NEON
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const int8* input_data, const RuntimeShape& filter_shape,
const int8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape, int8* output_data,
gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label("FullyConnectedInt8/8bit");
#ifdef USE_NEON
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
if (batches == 1) {
const int output_size = MatchingDim(filter_shape, filter_dim_count - 2,
output_shape, output_dim_count - 1);
if (output_size >= 4) {
return LegacyInt8FullyConnectedAsGEMV(
input_shape, input_data, input_offset, filter_shape, filter_data,
filter_offset, bias_shape, bias_data, output_offset,
output_multiplier, output_shift, output_activation_min,
output_activation_max, output_shape, output_data, gemmlowp_context);
}
}
#endif // USE_NEON
#ifdef GEMMLOWP_NEON
const int filter_rows = filter_shape.Dims(filter_dim_count - 2);
const int filter_cols = filter_shape.Dims(filter_dim_count - 1);
TFLITE_DCHECK_EQ(filter_shape.FlatSize(), filter_rows * filter_cols);
const int output_rows = output_shape.Dims(output_dim_count - 1);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
gemmlowp::MatrixMap<const int8, gemmlowp::MapOrder::RowMajor> filter_matrix(
filter_data, output_rows, filter_cols, filter_cols);
gemmlowp::MatrixMap<const int8, gemmlowp::MapOrder::ColMajor> input_matrix(
input_data, filter_cols, batches, filter_cols);
gemmlowp::MatrixMap<int8, gemmlowp::MapOrder::ColMajor> output_matrix(
output_data, output_rows, batches, output_rows);
const auto& output_pipeline = GemmlowpOutputPipelineInt8::MakeExp(
bias_data, output_rows, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max);
gemmlowp::GemmWithOutputPipeline<
int8, int8, gemmlowp::SignedL8R8WithLhsNonzeroBitDepthParams>(
gemmlowp_context, filter_matrix, input_matrix, &output_matrix,
filter_offset, input_offset, output_pipeline);
return;
#endif // GEMMLOWP_NEON
// If both GEMMLOWP_NEON && NEON paths are skipped, fallback to reference
// implementation.
reference_integer_ops::FullyConnected(params, input_shape, input_data,
filter_shape, filter_data, bias_shape,
bias_data, output_shape, output_data);
}
struct LegacyShuffledFullyConnectedWorkerTask : gemmlowp::Task {
LegacyShuffledFullyConnectedWorkerTask(const uint8* input_data,
const int8* shuffled_weights_data,
int batches, int output_depth,
int output_stride, int accum_depth,
const int32* bias_data,
int32 output_multiplier,
int output_shift, int16* output_data)
: input_data_(input_data),
shuffled_weights_data_(shuffled_weights_data),
batches_(batches),
output_depth_(output_depth),
output_stride_(output_stride),
accum_depth_(accum_depth),
bias_data_(bias_data),
output_multiplier_(output_multiplier),
output_shift_(output_shift),
output_data_(output_data) {}
void Run() override {
ShuffledFullyConnectedWorkerImpl(
input_data_, shuffled_weights_data_, batches_, output_depth_,
output_stride_, accum_depth_, bias_data_, output_multiplier_,
output_shift_, output_data_);
}
const uint8* input_data_;
const int8* shuffled_weights_data_;
int batches_;
int output_depth_;
int output_stride_;
int accum_depth_;
const int32* bias_data_;
int32 output_multiplier_;
int output_shift_;
int16* output_data_;
};
inline void ShuffledFullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& weights_shape,
const uint8* shuffled_weights_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int16* output_data, uint8* shuffled_input_workspace_data,
gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label("ShuffledFullyConnected/8bit");
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
(void)gemmlowp_context; // only used in optimized code.
TFLITE_DCHECK_EQ(output_activation_min, -32768);
TFLITE_DCHECK_EQ(output_activation_max, 32767);
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int weights_dim_count = weights_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = weights_shape.Dims(weights_dim_count - 1);
TFLITE_DCHECK((accum_depth % 16) == 0);
TFLITE_DCHECK((output_depth % 4) == 0);
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
// so that just reinterpreting them as int8 values is equivalent to
// subtracting 128 from them, thus implementing for free the subtraction of
// the zero_point value 128.
const int8* int8_shuffled_weights_data =
reinterpret_cast<const int8*>(shuffled_weights_data);
// Shuffling and xoring of input activations into the workspace buffer
if (batches == 1) {
#ifdef USE_NEON
const uint8x16_t signbit = vdupq_n_u8(0x80);
for (int i = 0; i < accum_depth; i += 16) {
uint8x16_t val = vld1q_u8(input_data + i);
val = veorq_u8(val, signbit);
vst1q_u8(shuffled_input_workspace_data + i, val);
}
#else
for (int i = 0; i < accum_depth; i++) {
shuffled_input_workspace_data[i] = input_data[i] ^ 0x80;
}
#endif
} else if (batches == 4) {
uint8* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
int c = 0;
#ifdef USE_NEON
const uint8x16_t signbit = vdupq_n_u8(0x80);
for (c = 0; c < accum_depth; c += 16) {
const uint8* src_data_ptr = input_data + c;
uint8x16_t val0 = vld1q_u8(src_data_ptr + 0 * accum_depth);
uint8x16_t val1 = vld1q_u8(src_data_ptr + 1 * accum_depth);
uint8x16_t val2 = vld1q_u8(src_data_ptr + 2 * accum_depth);
uint8x16_t val3 = vld1q_u8(src_data_ptr + 3 * accum_depth);
val0 = veorq_u8(val0, signbit);
val1 = veorq_u8(val1, signbit);
val2 = veorq_u8(val2, signbit);
val3 = veorq_u8(val3, signbit);
vst1q_u8(shuffled_input_workspace_ptr + 0, val0);
vst1q_u8(shuffled_input_workspace_ptr + 16, val1);
vst1q_u8(shuffled_input_workspace_ptr + 32, val2);
vst1q_u8(shuffled_input_workspace_ptr + 48, val3);
shuffled_input_workspace_ptr += 64;
}
#else
for (c = 0; c < accum_depth; c += 16) {
for (int b = 0; b < 4; b++) {
const uint8* src_data_ptr = input_data + b * accum_depth + c;
for (int j = 0; j < 16; j++) {
uint8 src_val = *src_data_ptr++;
// Flip the sign bit, so that the kernel will only need to
// reinterpret these uint8 values as int8, getting for free the
// subtraction of the zero_point value 128.
uint8 dst_val = src_val ^ 0x80;
*shuffled_input_workspace_ptr++ = dst_val;
}
}
}
#endif
} else {
TFLITE_DCHECK(false);
return;
}
static constexpr int kKernelRows = 4;
const int thread_count = gemmlowp::HowManyThreads<kKernelRows>(
gemmlowp_context->max_num_threads(), output_depth, batches, accum_depth);
if (thread_count == 1) {
// Single-thread case: do the computation on the current thread, don't
// use a threadpool
ShuffledFullyConnectedWorkerImpl(
shuffled_input_workspace_data, int8_shuffled_weights_data, batches,
output_depth, output_depth, accum_depth, bias_data, output_multiplier,
output_shift, output_data);
return;
}
// Multi-threaded case: use the gemmlowp context's threadpool.
TFLITE_DCHECK_GT(thread_count, 1);
std::vector<gemmlowp::Task*> tasks(thread_count);
const int kRowsPerWorker = gemmlowp::RoundUp<kKernelRows>(
gemmlowp::CeilQuotient(output_depth, thread_count));
int row_start = 0;
for (int i = 0; i < thread_count; i++) {
int row_end = std::min(output_depth, row_start + kRowsPerWorker);
tasks[i] = new LegacyShuffledFullyConnectedWorkerTask(
shuffled_input_workspace_data,
int8_shuffled_weights_data + row_start * accum_depth, batches,
row_end - row_start, output_depth, accum_depth, bias_data + row_start,
output_multiplier, output_shift, output_data + row_start);
row_start = row_end;
}
TFLITE_DCHECK_EQ(row_start, output_depth);
gemmlowp_context->workers_pool()->LegacyExecuteAndDestroyTasks(tasks);
}
inline void ShuffledFullyConnected(
const uint8* input_data, const Dims<4>& input_dims,
const uint8* shuffled_weights_data, const Dims<4>& weights_dims,
const int32* bias_data, const Dims<4>& bias_dims, int32 output_multiplier,
int output_shift, int32 output_activation_min, int32 output_activation_max,
int16* output_data, const Dims<4>& output_dims,
uint8* shuffled_input_workspace_data,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::FullyConnectedParams op_params;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
ShuffledFullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(weights_dims), shuffled_weights_data,
DimsToShape(bias_dims), bias_data,
DimsToShape(output_dims), output_data,
shuffled_input_workspace_data, gemmlowp_context);
}
template <typename T>
inline void ExtractPatchIntoBufferColumn(
const Dims<4>& input_dims, int w, int h, int b, int kheight, int kwidth,
int stride_width, int stride_height, int pad_width, int pad_height,
int in_width, int in_height, int in_depth, int single_buffer_length,
int buffer_id, const T* in_data, T* conv_buffer_data, uint8 zero_byte) {
ExtractPatchIntoBufferColumn(
DimsToShape(input_dims), w, h, b, kheight, kwidth, stride_width,
stride_height, pad_width, pad_height, in_width, in_height, in_depth,
single_buffer_length, buffer_id, in_data, conv_buffer_data, zero_byte);
}
template <typename T>
void DilatedIm2col(const T* input_data, const Dims<4>& input_dims,
const Dims<4>& filter_dims, int stride_width,
int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
const Dims<4>& output_dims, uint8 zero_byte,
T* im2col_data) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
DilatedIm2col(op_params, zero_byte, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), DimsToShape(output_dims),
im2col_data);
}
template <typename T>
void Im2col(const T* input_data, const Dims<4>& input_dims, int stride_width,
int stride_height, int pad_width, int pad_height, int kheight,
int kwidth, uint8 zero_byte, T* output_data,
const Dims<4>& output_dims) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = 1;
op_params.dilation_height_factor = 1;
Im2col(op_params, kheight, kwidth, zero_byte, DimsToShape(input_dims),
input_data, DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <typename T>
void Im2col(const T* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int kheight, int kwidth,
uint8 zero_byte, T* output_data, const Dims<4>& output_dims) {
Im2col(input_data, input_dims, stride, stride, pad_width, pad_height, kheight,
kwidth, zero_byte, output_data, output_dims);
}
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape,
float* im2col_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
(void)im2col_data;
(void)im2col_shape;
ruy::profiler::ScopeLabel label("Conv");
// NB: the float 0.0f value is represented by all zero bytes.
const uint8 float_zero_byte = 0x00;
const float* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
if (need_dilated_im2col) {
DilatedIm2col(params, float_zero_byte, input_shape, input_data,
filter_shape, output_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
Im2col(params, filter_height, filter_width, float_zero_byte, input_shape,
input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
// TODO(aselle): We need to make sure to not send im2col if it is not
// needed.
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
// The following code computes matrix multiplication c = a * transponse(b)
// with CBLAS, where:
// * `a` is a matrix with dimensions (m, k).
// * `b` is a matrix with dimensions (n, k), so transpose(b) is (k, n).
// * `c` is a matrix with dimensions (m, n).
// The naming of variables are aligned with CBLAS specification here.
const float* a = gemm_input_data;
const float* b = filter_data;
float* c = output_data;
const int gemm_input_dims = gemm_input_shape->DimensionsCount();
int m = FlatSizeSkipDim(*gemm_input_shape, gemm_input_dims - 1);
int n = output_shape.Dims(3);
int k = gemm_input_shape->Dims(gemm_input_dims - 1);
#if defined(TF_LITE_USE_CBLAS) && defined(__APPLE__)
// The stride of matrix a, b and c respectively.
int stride_a = k;
int stride_b = k;
int stride_c = n;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, m, n, k, 1.0f, a,
stride_a, b, stride_b, 0.0f, c, stride_c);
#else
// When an optimized CBLAS implementation is not available, fall back
// to using Eigen.
typedef Eigen::Matrix<float, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>
Matrix;
typedef Eigen::Map<Matrix> MatrixRef;
typedef Eigen::Map<const Matrix> ConstMatrixRef;
MatrixRef matrix_c(c, m, n);
ConstMatrixRef matrix_a(a, m, k);
ConstMatrixRef matrix_b(b, n, k);
// The following special casing for when a or b is a vector is required
// as Eigen seem to fail to make this optimization on its own.
if (n == 1) {
ruy::profiler::ScopeLabel label("GEMV");
matrix_c.col(0).noalias() = matrix_a * matrix_b.row(0).transpose();
} else if (m == 1) {
ruy::profiler::ScopeLabel label("GEMV");
matrix_c.row(0).noalias() = matrix_a.row(0) * matrix_b.transpose();
} else {
ruy::profiler::ScopeLabel label("GEMM");
matrix_c.noalias() = matrix_a * matrix_b.transpose();
}
#endif // defined(TF_LITE_USE_CBLAS) && defined(__APPLE__)
optimized_ops::AddBiasAndEvalActivationFunction(
output_activation_min, output_activation_max, bias_shape, bias_data,
output_shape, output_data);
}
inline void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims,
float* im2col_data, const Dims<4>& im2col_dims) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Conv(op_params, DimsToShape(input_dims), input_data, DimsToShape(filter_dims),
filter_data, DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data, DimsToShape(im2col_dims), im2col_data);
}
inline void HybridConv(const int8_t* input_data, const Dims<4>& input_dims,
const int8_t* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, float* scaling_factors_ptr,
float output_activation_min, float output_activation_max,
int32_t* scratch_data, const Dims<4>& scratch_dims,
float* output_data, const Dims<4>& output_dims,
int8_t* im2col_data, const Dims<4>& im2col_dims,
CpuBackendContext* context) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
HybridConv(op_params, scaling_factors_ptr, DimsToShape(input_dims),
input_data, DimsToShape(filter_dims), filter_data,
DimsToShape(bias_dims), bias_data, DimsToShape(scratch_dims),
scratch_data, DimsToShape(output_dims), output_data,
DimsToShape(im2col_dims), im2col_data, context);
}
template <FusedActivationFunctionType Ac>
void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride_width,
int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
float* output_data, const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
Conv(input_data, input_dims, filter_data, filter_dims, bias_data, bias_dims,
stride_width, stride_height, dilation_width_factor,
dilation_height_factor, pad_width, pad_height, output_activation_min,
output_activation_max, output_data, output_dims, im2col_data,
im2col_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride_width,
int stride_height, int pad_width, int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
Conv(input_data, input_dims, filter_data, filter_dims, bias_data, bias_dims,
stride_width, stride_height, 1, 1, pad_width, pad_height,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
Conv<Ac>(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride, stride, 1, 1, pad_width, pad_height, output_data,
output_dims, im2col_data, im2col_dims);
}
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, const RuntimeShape& im2col_shape,
uint8* im2col_data, gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label("Conv/8bit");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const uint8* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
if (need_dilated_im2col) {
TFLITE_DCHECK(im2col_data);
const int input_zero_point = -input_offset;
TFLITE_DCHECK_GE(input_zero_point, 0);
TFLITE_DCHECK_LE(input_zero_point, 255);
DilatedIm2col(params, input_zero_point, input_shape, input_data,
filter_shape, output_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
const int input_zero_point = -input_offset;
TFLITE_DCHECK_GE(input_zero_point, 0);
TFLITE_DCHECK_LE(input_zero_point, 255);
Im2col(params, filter_height, filter_width, input_zero_point, input_shape,
input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
const int gemm_input_rows = gemm_input_shape->Dims(3);
// Using FlatSizeSkipDim causes segfault in some contexts (see b/79927784).
// The root cause has not yet been identified though. Same applies below for
// the other calls commented out. This is a partial rollback of cl/196819423.
// const int gemm_input_cols = FlatSizeSkipDim(*gemm_input_shape, 3);
const int gemm_input_cols = gemm_input_shape->Dims(0) *
gemm_input_shape->Dims(1) *
gemm_input_shape->Dims(2);
const int filter_rows = filter_shape.Dims(0);
// See b/79927784.
// const int filter_cols = FlatSizeSkipDim(filter_shape, 0);
const int filter_cols =
filter_shape.Dims(1) * filter_shape.Dims(2) * filter_shape.Dims(3);
const int output_rows = output_shape.Dims(3);
// See b/79927784.
// const int output_cols = FlatSizeSkipDim(output_shape, 3);
const int output_cols =
output_shape.Dims(0) * output_shape.Dims(1) * output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(output_cols, gemm_input_cols);
TFLITE_DCHECK_EQ(filter_cols, gemm_input_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
#ifdef USE_NEON
if (gemm_input_cols == 1 && output_rows >= 4) {
RuntimeShape fc_filter_shape{
filter_shape.Dims(0),
filter_shape.Dims(filter_shape.DimensionsCount() - 1)};
return FullyConnectedAsGEMV(
*gemm_input_shape, gemm_input_data, input_offset, fc_filter_shape,
filter_data, filter_offset, bias_shape, bias_data, output_offset,
output_multiplier, output_shift, output_activation_min,
output_activation_max, output_shape, output_data, gemmlowp_context);
}
#endif
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::RowMajor> filter_matrix(
filter_data, filter_rows, filter_cols);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::ColMajor> input_matrix(
gemm_input_data, gemm_input_rows, gemm_input_cols);
gemmlowp::MatrixMap<uint8, gemmlowp::MapOrder::ColMajor> output_matrix(
output_data, output_rows, output_cols);
const auto& output_pipeline = GemmlowpOutputPipeline::MakeExp(
bias_data, output_rows, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max);
gemmlowp::GemmWithOutputPipeline<uint8, uint8,
gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
gemmlowp_context, filter_matrix, input_matrix, &output_matrix,
filter_offset, input_offset, output_pipeline);
}
inline void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims,
uint8* im2col_data, const Dims<4>& im2col_dims,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Conv(op_params, DimsToShape(input_dims), input_data, DimsToShape(filter_dims),
filter_data, DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data, DimsToShape(im2col_dims), im2col_data, gemmlowp_context);
}
inline void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims, uint8* im2col_data,
const Dims<4>& im2col_dims,
gemmlowp::GemmContext* gemmlowp_context) {
Conv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width, stride_height, 1, 1,
pad_width, pad_height, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims, gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
inline void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims, uint8* im2col_data,
const Dims<4>& im2col_dims,
gemmlowp::GemmContext* gemmlowp_context) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
Conv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width, stride_height,
pad_width, pad_height, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims, gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int32 output_offset,
int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims, uint8* im2col_data,
const Dims<4>& im2col_dims, gemmlowp::GemmContext* gemmlowp_context) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
Conv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride, stride, pad_width,
pad_height, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims, gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac, typename T>
void Im2col(const T* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int kheight, int kwidth,
uint8 zero_byte, T* output_data, const Dims<4>& output_dims) {
Im2col(input_data, input_dims, stride, stride, pad_width, pad_height, kheight,
kwidth, zero_byte, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void ConvAsGemm(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
float* output_data, const Dims<4>& output_dims) {
ruy::profiler::ScopeLabel label("ConvAsGemm");
const auto input_matrix_map =
MapAsMatrixWithFirstDimAsRows(input_data, input_dims);
const auto filter_matrix_map =
MapAsMatrixWithLastDimAsCols(filter_data, filter_dims);
auto output_matrix_map =
MapAsMatrixWithFirstDimAsRows(output_data, output_dims);
Gemm(filter_matrix_map.transpose(), input_matrix_map, &output_matrix_map);
AddBiasAndEvalActivationFunction<Ac>(bias_data, bias_dims, output_data,
output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void ConvAsGemm(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label("ConvAsGemm/8bit");
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
const int input_rows = input_dims.sizes[0];
const int input_cols = FlatSizeSkipDim(input_dims, 0);
const int filter_rows = filter_dims.sizes[3];
const int filter_cols = FlatSizeSkipDim(filter_dims, 3);
const int output_rows = output_dims.sizes[0];
const int output_cols = FlatSizeSkipDim(output_dims, 0);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(output_cols, input_cols);
TFLITE_DCHECK_EQ(filter_cols, input_rows);
TFLITE_DCHECK_EQ(bias_dims.sizes[0], output_rows);
TFLITE_DCHECK_EQ(bias_dims.sizes[1], 1);
TFLITE_DCHECK_EQ(bias_dims.sizes[2], 1);
TFLITE_DCHECK_EQ(bias_dims.sizes[3], 1);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::RowMajor> filter_matrix(
filter_data, output_rows, filter_cols, filter_cols);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::ColMajor> input_matrix(
input_data, filter_cols, output_cols, filter_cols);
gemmlowp::MatrixMap<uint8, gemmlowp::MapOrder::ColMajor> output_matrix(
output_data, output_rows, output_cols, output_rows);
const auto& output_pipeline = GemmlowpOutputPipeline::MakeExp(
bias_data, output_rows, output_offset, output_multiplier, -output_shift,
output_activation_min, output_activation_max);
gemmlowp::GemmWithOutputPipeline<uint8, uint8,
gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
gemmlowp_context, filter_matrix, input_matrix, &output_matrix,
filter_offset, input_offset, output_pipeline);
}
inline void TransposeConv(
const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape, float* im2col_data) {
ruy::profiler::ScopeLabel label("TransposeConv");
// Note we could use transposed weights with forward conv for unstrided
// cases. But we are already getting good performance with this code as-is.
TFLITE_DCHECK(im2col_data);
TransposeIm2col(params, 0, input_shape, input_data, filter_shape,
output_shape, im2col_data);
const auto im2col_matrix_map =
MapAsMatrixWithLastDimAsRows(im2col_data, im2col_shape);
const auto filter_matrix_map =
MapAsMatrixWithFirstDimAsCols(filter_data, filter_shape);
auto output_matrix_map =
MapAsMatrixWithLastDimAsRows(output_data, output_shape);
Gemm(filter_matrix_map.transpose(), im2col_matrix_map, &output_matrix_map);
}
inline void TransposeConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
TransposeConv(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(output_dims),
output_data, DimsToShape(im2col_dims), im2col_data);
}
inline void TransposeConvV2(
const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& hwoi_ordered_filter_shape,
const float* hwoi_ordered_filter_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& col2im_shape, float* col2im_data,
CpuBackendContext* cpu_backend_context) {
TransposeConvV2(params, input_shape, input_data, hwoi_ordered_filter_shape,
hwoi_ordered_filter_data, /*bias_shape*/ RuntimeShape(),
/*bias_data*/ nullptr, output_shape, output_data,
col2im_shape, col2im_data, cpu_backend_context);
}
template <typename T>
void TransposeIm2col(const T* input_data, const Dims<4>& input_dims,
const Dims<4>& filter_dims, int stride_width,
int stride_height, int pad_width, int pad_height,
const Dims<4>& output_dims, uint8 zero_byte,
T* im2col_data) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
TransposeIm2col(op_params, zero_byte, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), DimsToShape(output_dims),
im2col_data);
}
inline void LstmCell(
const LstmCellParams& params, const RuntimeShape& unextended_input_shape,
const float* input_data, const RuntimeShape& unextended_prev_activ_shape,
const float* prev_activ_data, const RuntimeShape& weights_shape,
const float* weights_data, const RuntimeShape& unextended_bias_shape,
const float* bias_data, const RuntimeShape& unextended_prev_state_shape,
const float* prev_state_data,
const RuntimeShape& unextended_output_state_shape, float* output_state_data,
const RuntimeShape& unextended_output_activ_shape, float* output_activ_data,
const RuntimeShape& unextended_concat_temp_shape, float* concat_temp_data,
const RuntimeShape& unextended_activ_temp_shape, float* activ_temp_data) {
ruy::profiler::ScopeLabel label("LstmCell");
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape prev_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape);
const RuntimeShape bias_shape =
RuntimeShape::ExtendedShape(4, unextended_bias_shape);
const RuntimeShape prev_state_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_state_shape);
const RuntimeShape output_state_shape =
RuntimeShape::ExtendedShape(4, unextended_output_state_shape);
const RuntimeShape output_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_output_activ_shape);
const RuntimeShape concat_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape);
const RuntimeShape activ_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
const int weights_dim_count = weights_shape.DimensionsCount();
MatchingDim( // batches
input_shape, 0, prev_activ_shape, 0, prev_state_shape, 0,
output_state_shape, 0, output_activ_shape, 0);
MatchingDim( // height
input_shape, 1, prev_activ_shape, 1, prev_state_shape, 1,
output_state_shape, 1, output_activ_shape, 1);
MatchingDim( // width
input_shape, 2, prev_activ_shape, 2, prev_state_shape, 2,
output_state_shape, 2, output_activ_shape, 2);
const int input_depth = input_shape.Dims(3);
const int prev_activ_depth = prev_activ_shape.Dims(3);
const int total_input_depth = prev_activ_depth + input_depth;
TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1),
total_input_depth);
TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1);
const int intern_activ_depth =
MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3);
TFLITE_DCHECK_EQ(weights_shape.FlatSize(),
intern_activ_depth * total_input_depth);
TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0);
const int output_depth =
MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape,
3, output_activ_shape, 3);
TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4);
// Concatenate prev_activ and input data together
std::vector<float const*> concat_input_arrays_data;
std::vector<RuntimeShape const*> concat_input_arrays_shapes;
concat_input_arrays_data.push_back(input_data);
concat_input_arrays_data.push_back(prev_activ_data);
concat_input_arrays_shapes.push_back(&input_shape);
concat_input_arrays_shapes.push_back(&prev_activ_shape);
tflite::ConcatenationParams concat_params;
concat_params.axis = 3;
concat_params.inputs_count = concat_input_arrays_data.size();
Concatenation(concat_params, &(concat_input_arrays_shapes[0]),
&(concat_input_arrays_data[0]), concat_temp_shape,
concat_temp_data);
// Fully connected
tflite::FullyConnectedParams fc_params;
fc_params.float_activation_min = std::numeric_limits<float>::lowest();
fc_params.float_activation_max = std::numeric_limits<float>::max();
FullyConnected(fc_params, concat_temp_shape, concat_temp_data, weights_shape,
weights_data, bias_shape, bias_data, activ_temp_shape,
activ_temp_data);
// Map raw arrays to Eigen arrays so we can use Eigen's optimized array
// operations.
ArrayMap<float> activ_temp_map =
MapAsArrayWithLastDimAsRows(activ_temp_data, activ_temp_shape);
auto input_gate_sm = activ_temp_map.block(0 * output_depth, 0, output_depth,
activ_temp_map.cols());
auto new_input_sm = activ_temp_map.block(1 * output_depth, 0, output_depth,
activ_temp_map.cols());
auto forget_gate_sm = activ_temp_map.block(2 * output_depth, 0, output_depth,
activ_temp_map.cols());
auto output_gate_sm = activ_temp_map.block(3 * output_depth, 0, output_depth,
activ_temp_map.cols());
ArrayMap<const float> prev_state_map =
MapAsArrayWithLastDimAsRows(prev_state_data, prev_state_shape);
ArrayMap<float> output_state_map =
MapAsArrayWithLastDimAsRows(output_state_data, output_state_shape);
ArrayMap<float> output_activ_map =
MapAsArrayWithLastDimAsRows(output_activ_data, output_activ_shape);
// Combined memory state and final output calculation
ruy::profiler::ScopeLabel label2("MemoryStateAndFinalOutput");
output_state_map =
input_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) *
new_input_sm.tanh() +
forget_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) *
prev_state_map;
output_activ_map =
output_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) *
output_state_map.tanh();
}
inline void LstmCell(const float* input_data, const Dims<4>& input_dims,
const float* prev_activ_data,
const Dims<4>& prev_activ_dims, const float* weights_data,
const Dims<4>& weights_dims, const float* bias_data,
const Dims<4>& bias_dims, const float* prev_state_data,
const Dims<4>& prev_state_dims, float* output_state_data,
const Dims<4>& output_state_dims, float* output_activ_data,
const Dims<4>& output_activ_dims, float* concat_temp_data,
const Dims<4>& concat_temp_dims, float* activ_temp_data,
const Dims<4>& activ_temp_dims) {
tflite::LstmCellParams op_params;
// Float LSTM cell does not need parameters to be set: leave untouched.
LstmCell(op_params, DimsToShape(input_dims), input_data,
DimsToShape(prev_activ_dims), prev_activ_data,
DimsToShape(weights_dims), weights_data, DimsToShape(bias_dims),
bias_data, DimsToShape(prev_state_dims), prev_state_data,
DimsToShape(output_state_dims), output_state_data,
DimsToShape(output_activ_dims), output_activ_data,
DimsToShape(concat_temp_dims), concat_temp_data,
DimsToShape(activ_temp_dims), activ_temp_data);
}
template <int StateIntegerBits>
inline void LstmCell(
const LstmCellParams& params, const RuntimeShape& unextended_input_shape,
const uint8* input_data_uint8,
const RuntimeShape& unextended_prev_activ_shape,
const uint8* prev_activ_data_uint8, const RuntimeShape& weights_shape,
const uint8* weights_data_uint8, const RuntimeShape& unextended_bias_shape,
const int32* bias_data_int32,
const RuntimeShape& unextended_prev_state_shape,
const int16* prev_state_data_int16,
const RuntimeShape& unextended_output_state_shape,
int16* output_state_data_int16,
const RuntimeShape& unextended_output_activ_shape,
uint8* output_activ_data_uint8,
const RuntimeShape& unextended_concat_temp_shape,
uint8* concat_temp_data_uint8,
const RuntimeShape& unextended_activ_temp_shape,
int16* activ_temp_data_int16, gemmlowp::GemmContext* gemmlowp_context) {
ruy::profiler::ScopeLabel label(
"LstmCell/quantized (8bit external, 16bit internal)");
int32 weights_zero_point = params.weights_zero_point;
int32 accum_multiplier = params.accum_multiplier;
int accum_shift = params.accum_shift;
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape prev_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape);
const RuntimeShape bias_shape =
RuntimeShape::ExtendedShape(4, unextended_bias_shape);
const RuntimeShape prev_state_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_state_shape);
const RuntimeShape output_state_shape =
RuntimeShape::ExtendedShape(4, unextended_output_state_shape);
const RuntimeShape output_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_output_activ_shape);
const RuntimeShape concat_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape);
const RuntimeShape activ_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
// Gather dimensions information, and perform consistency checks.
const int weights_dim_count = weights_shape.DimensionsCount();
const int outer_size = MatchingFlatSizeSkipDim(
input_shape, 3, prev_activ_shape, prev_state_shape, output_state_shape,
output_activ_shape);
const int input_depth = input_shape.Dims(3);
const int prev_activ_depth = prev_activ_shape.Dims(3);
const int total_input_depth = prev_activ_depth + input_depth;
TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1),
total_input_depth);
const int intern_activ_depth =
MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3);
TFLITE_DCHECK_EQ(weights_shape.FlatSize(),
intern_activ_depth * total_input_depth);
TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1);
TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0);
const int output_depth =
MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape,
3, output_activ_shape, 3);
TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4);
const int fc_batches = FlatSizeSkipDim(activ_temp_shape, 3);
const int fc_output_depth =
MatchingDim(weights_shape, weights_dim_count - 2, activ_temp_shape, 3);
const int fc_accum_depth = total_input_depth;
TFLITE_DCHECK_EQ(fc_output_depth, 4 * output_depth);
// Depth-concatenate prev_activ and input data together.
uint8 const* concat_input_arrays_data[2] = {input_data_uint8,
prev_activ_data_uint8};
const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape,
&prev_activ_shape};
tflite::ConcatenationParams concat_params;
concat_params.axis = 3;
concat_params.inputs_count = 2;
Concatenation(concat_params, concat_input_arrays_shapes,
concat_input_arrays_data, concat_temp_shape,
concat_temp_data_uint8);
// Implementation of the fully connected node inside the LSTM cell.
// The operands are 8-bit integers, the accumulators are internally 32bit
// integers, and the output is 16-bit fixed-point with 3 integer bits so
// the output range is [-2^3, 2^3] == [-8, 8]. The rationale for that
// is explained in the function comment above.
bool gemm_already_performed = false;
#ifdef GEMMLOWP_NEON
if (fc_batches == 1 && !(fc_output_depth % 4) && !(fc_accum_depth % 8)) {
GEMVForLstmCell(concat_temp_shape, concat_temp_data_uint8, weights_shape,
weights_data_uint8, weights_zero_point, bias_shape,
bias_data_int32, accum_multiplier, accum_shift,
activ_temp_shape, activ_temp_data_int16);
gemm_already_performed = true;
}
#endif
if (!gemm_already_performed) {
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::RowMajor>
weights_matrix(weights_data_uint8, fc_output_depth, fc_accum_depth);
gemmlowp::MatrixMap<const uint8, gemmlowp::MapOrder::ColMajor> input_matrix(
concat_temp_data_uint8, fc_accum_depth, fc_batches);
gemmlowp::MatrixMap<int16, gemmlowp::MapOrder::ColMajor> output_matrix(
activ_temp_data_int16, fc_output_depth, fc_batches);
typedef gemmlowp::VectorMap<const int32, gemmlowp::VectorShape::Col>
ColVectorMap;
ColVectorMap bias_vector(bias_data_int32, fc_output_depth);
gemmlowp::OutputStageBiasAddition<ColVectorMap> bias_addition_stage;
bias_addition_stage.bias_vector = bias_vector;
gemmlowp::OutputStageScaleInt32ByFixedPointAndExponent scale_stage;
scale_stage.result_offset_after_shift = 0;
scale_stage.result_fixedpoint_multiplier = accum_multiplier;
scale_stage.result_exponent = accum_shift;
gemmlowp::OutputStageSaturatingCastToInt16 saturating_cast_int16_stage;
auto output_pipeline = std::make_tuple(bias_addition_stage, scale_stage,
saturating_cast_int16_stage);
gemmlowp::GemmWithOutputPipeline<
uint8, int16, gemmlowp::L8R8WithLhsNonzeroBitDepthParams>(
gemmlowp_context, weights_matrix, input_matrix, &output_matrix,
-weights_zero_point, -128, output_pipeline);
}
// Rest of the LSTM cell: tanh and logistic math functions, and some adds
// and muls, all done in 16-bit fixed-point.
const int16* input_gate_input_ptr = activ_temp_data_int16;
const int16* input_modulation_gate_input_ptr =
activ_temp_data_int16 + output_depth;
const int16* forget_gate_input_ptr = activ_temp_data_int16 + 2 * output_depth;
const int16* output_gate_input_ptr = activ_temp_data_int16 + 3 * output_depth;
const int16* prev_state_ptr = prev_state_data_int16;
int16* output_state_data_ptr = output_state_data_int16;
uint8* output_activ_data_ptr = output_activ_data_uint8;
for (int b = 0; b < outer_size; ++b) {
int c = 0;
#ifdef GEMMLOWP_NEON
for (; c <= output_depth - 8; c += 8) {
// Define the fixed-point data types that we will use here. All use
// int16 as the underlying integer type i.e. all are 16-bit fixed-point.
// They only differ by the number of integral vs. fractional bits,
// determining the range of values that they can represent.
//
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<int16x8_t, 0>;
// F3 uses 3 integer bits, range [-8, 8].
// This is the range of the previous fully-connected node's output,
// which is our input here.
using F3 = gemmlowp::FixedPoint<int16x8_t, 3>;
// FS uses StateIntegerBits integer bits, range [-2^StateIntegerBits,
// 2^StateIntegerBits]. It's used to represent the internal state, whose
// number of integer bits is currently dictated by the model. See comment
// on the StateIntegerBits template parameter above.
using FS = gemmlowp::FixedPoint<int16x8_t, StateIntegerBits>;
// Implementation of input gate, using fixed-point logistic function.
F3 input_gate_input = F3::FromRaw(vld1q_s16(input_gate_input_ptr));
input_gate_input_ptr += 8;
F0 input_gate_output = gemmlowp::logistic(input_gate_input);
// Implementation of input modulation gate, using fixed-point tanh
// function.
F3 input_modulation_gate_input =
F3::FromRaw(vld1q_s16(input_modulation_gate_input_ptr));
input_modulation_gate_input_ptr += 8;
F0 input_modulation_gate_output =
gemmlowp::tanh(input_modulation_gate_input);
// Implementation of forget gate, using fixed-point logistic function.
F3 forget_gate_input = F3::FromRaw(vld1q_s16(forget_gate_input_ptr));
forget_gate_input_ptr += 8;
F0 forget_gate_output = gemmlowp::logistic(forget_gate_input);
// Implementation of output gate, using fixed-point logistic function.
F3 output_gate_input = F3::FromRaw(vld1q_s16(output_gate_input_ptr));
output_gate_input_ptr += 8;
F0 output_gate_output = gemmlowp::logistic(output_gate_input);
// Implementation of internal multiplication nodes, still in fixed-point.
F0 input_times_input_modulation =
input_gate_output * input_modulation_gate_output;
FS prev_state = FS::FromRaw(vld1q_s16(prev_state_ptr));
prev_state_ptr += 8;
FS prev_state_times_forget_state = forget_gate_output * prev_state;
// Implementation of internal addition node, saturating.
FS new_state = gemmlowp::SaturatingAdd(
gemmlowp::Rescale<StateIntegerBits>(input_times_input_modulation),
prev_state_times_forget_state);
// Implementation of last internal Tanh node, still in fixed-point.
// Since a Tanh fixed-point implementation is specialized for a given
// number or integer bits, and each specialization can have a substantial
// code size, and we already used above a Tanh on an input with 3 integer
// bits, and per the table in the above function comment there is no
// significant accuracy to be lost by clamping to [-8, +8] for a
// 3-integer-bits representation, let us just do that. This helps people
// porting this to targets where code footprint must be minimized.
F3 new_state_f3 = gemmlowp::Rescale<3>(new_state);
F0 output_activ_int16 = output_gate_output * gemmlowp::tanh(new_state_f3);
// Store the new internal state back to memory, as 16-bit integers.
// Note: here we store the original value with StateIntegerBits, not
// the rescaled 3-integer-bits value fed to tanh.
vst1q_s16(output_state_data_ptr, new_state.raw());
output_state_data_ptr += 8;
// Down-scale the output activations to 8-bit integers, saturating,
// and store back to memory.
int16x8_t rescaled_output_activ =
gemmlowp::RoundingDivideByPOT(output_activ_int16.raw(), 8);
int8x8_t int8_output_activ = vqmovn_s16(rescaled_output_activ);
uint8x8_t uint8_output_activ =
vadd_u8(vdup_n_u8(128), vreinterpret_u8_s8(int8_output_activ));
vst1_u8(output_activ_data_ptr, uint8_output_activ);
output_activ_data_ptr += 8;
}
#endif
for (; c < output_depth; ++c) {
// Define the fixed-point data types that we will use here. All use
// int16 as the underlying integer type i.e. all are 16-bit fixed-point.
// They only differ by the number of integral vs. fractional bits,
// determining the range of values that they can represent.
//
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
// F3 uses 3 integer bits, range [-8, 8].
// This is the range of the previous fully-connected node's output,
// which is our input here.
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
// FS uses StateIntegerBits integer bits, range [-2^StateIntegerBits,
// 2^StateIntegerBits]. It's used to represent the internal state, whose
// number of integer bits is currently dictated by the model. See comment
// on the StateIntegerBits template parameter above.
using FS = gemmlowp::FixedPoint<std::int16_t, StateIntegerBits>;
// Implementation of input gate, using fixed-point logistic function.
F3 input_gate_input = F3::FromRaw(*input_gate_input_ptr++);
F0 input_gate_output = gemmlowp::logistic(input_gate_input);
// Implementation of input modulation gate, using fixed-point tanh
// function.
F3 input_modulation_gate_input =
F3::FromRaw(*input_modulation_gate_input_ptr++);
F0 input_modulation_gate_output =
gemmlowp::tanh(input_modulation_gate_input);
// Implementation of forget gate, using fixed-point logistic function.
F3 forget_gate_input = F3::FromRaw(*forget_gate_input_ptr++);
F0 forget_gate_output = gemmlowp::logistic(forget_gate_input);
// Implementation of output gate, using fixed-point logistic function.
F3 output_gate_input = F3::FromRaw(*output_gate_input_ptr++);
F0 output_gate_output = gemmlowp::logistic(output_gate_input);
// Implementation of internal multiplication nodes, still in fixed-point.
F0 input_times_input_modulation =
input_gate_output * input_modulation_gate_output;
FS prev_state = FS::FromRaw(*prev_state_ptr++);
FS prev_state_times_forget_state = forget_gate_output * prev_state;
// Implementation of internal addition node, saturating.
FS new_state = gemmlowp::SaturatingAdd(
gemmlowp::Rescale<StateIntegerBits>(input_times_input_modulation),
prev_state_times_forget_state);
// Implementation of last internal Tanh node, still in fixed-point.
// Since a Tanh fixed-point implementation is specialized for a given
// number or integer bits, and each specialization can have a substantial
// code size, and we already used above a Tanh on an input with 3 integer
// bits, and per the table in the above function comment there is no
// significant accuracy to be lost by clamping to [-8, +8] for a
// 3-integer-bits representation, let us just do that. This helps people
// porting this to targets where code footprint must be minimized.
F3 new_state_f3 = gemmlowp::Rescale<3>(new_state);
F0 output_activ_int16 = output_gate_output * gemmlowp::tanh(new_state_f3);
// Store the new internal state back to memory, as 16-bit integers.
// Note: here we store the original value with StateIntegerBits, not
// the rescaled 3-integer-bits value fed to tanh.
*output_state_data_ptr++ = new_state.raw();
// Down-scale the output activations to 8-bit integers, saturating,
// and store back to memory.
int16 rescaled_output_activ =
gemmlowp::RoundingDivideByPOT(output_activ_int16.raw(), 8);
int16 clamped_output_activ =
std::max<int16>(-128, std::min<int16>(127, rescaled_output_activ));
*output_activ_data_ptr++ = 128 + clamped_output_activ;
}
input_gate_input_ptr += 3 * output_depth;
input_modulation_gate_input_ptr += 3 * output_depth;
forget_gate_input_ptr += 3 * output_depth;
output_gate_input_ptr += 3 * output_depth;
}
}
template <int StateIntegerBits>
void LstmCell(const uint8* input_data_uint8, const Dims<4>& input_dims,
const uint8* prev_activ_data_uint8,
const Dims<4>& prev_activ_dims, const uint8* weights_data_uint8,
const Dims<4>& weights_dims, const int32* bias_data_int32,
const Dims<4>& bias_dims, const int16* prev_state_data_int16,
const Dims<4>& prev_state_dims, int16* output_state_data_int16,
const Dims<4>& output_state_dims, uint8* output_activ_data_uint8,
const Dims<4>& output_activ_dims, uint8* concat_temp_data_uint8,
const Dims<4>& concat_temp_dims, int16* activ_temp_data_int16,
const Dims<4>& activ_temp_dims, int32 weights_zero_point,
int32 accum_multiplier, int accum_shift,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::LstmCellParams op_params;
op_params.weights_zero_point = weights_zero_point;
op_params.accum_multiplier = accum_multiplier;
op_params.accum_shift = accum_shift;
LstmCell<StateIntegerBits>(
op_params, DimsToShape(input_dims), input_data_uint8,
DimsToShape(prev_activ_dims), prev_activ_data_uint8,
DimsToShape(weights_dims), weights_data_uint8, DimsToShape(bias_dims),
bias_data_int32, DimsToShape(prev_state_dims), prev_state_data_int16,
DimsToShape(output_state_dims), output_state_data_int16,
DimsToShape(output_activ_dims), output_activ_data_uint8,
DimsToShape(concat_temp_dims), concat_temp_data_uint8,
DimsToShape(activ_temp_dims), activ_temp_data_int16, gemmlowp_context);
}
template <typename T>
void BroadcastDiv(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
BroadcastDivSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac>
void L2Normalization(const float* input_data, const RuntimeShape& input_shape,
float* output_data, const RuntimeShape& output_shape) {
static_assert(Ac == FusedActivationFunctionType::kNone, "");
tflite::L2NormalizationParams op_params;
// No params need to be set for float, but reserved in signature for future
// activations.
L2Normalization(op_params, input_shape, input_data, output_shape,
output_data);
}
inline void L2Normalization(const uint8* input_data,
const RuntimeShape& input_shape,
int32 input_zero_point, uint8* output_data,
const RuntimeShape& output_shape) {
tflite::L2NormalizationParams op_params;
op_params.input_zero_point = input_zero_point;
L2Normalization(op_params, input_shape, input_data, output_shape,
output_data);
}
template <FusedActivationFunctionType Ac>
void L2Normalization(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
L2Normalization<Ac>(input_data, DimsToShape(input_dims), output_data,
DimsToShape(output_dims));
}
inline void L2Normalization(const uint8* input_data, const Dims<4>& input_dims,
int32 input_zero_point, uint8* output_data,
const Dims<4>& output_dims) {
L2Normalization(input_data, DimsToShape(input_dims), input_zero_point,
output_data, DimsToShape(output_dims));
}
inline void Relu(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Relu(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Add(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac>
inline void Add(int left_shift, const uint8* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift,
const uint8* input2_data, const Dims<4>& input2_dims,
int32 input2_offset, int32 input2_multiplier, int input2_shift,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
tflite::ArithmeticParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac>
void Add(const int32* input1_data, const Dims<4>& input1_dims,
const int32* input2_data, const Dims<4>& input2_dims,
int32* output_data, const Dims<4>& output_dims) {
ruy::profiler::ScopeLabel label("Add/int32");
TFLITE_DCHECK(Ac == FusedActivationFunctionType::kNone);
tflite::ArithmeticParams op_params;
op_params.quantized_activation_min = std::numeric_limits<int32>::min();
op_params.quantized_activation_max = std::numeric_limits<int32>::max();
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac>
inline void BroadcastAdd(int left_shift, const uint8* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift,
const uint8* input2_data, const Dims<4>& input2_dims,
int32 input2_offset, int32 input2_multiplier,
int input2_shift, int32 output_offset,
int32 output_multiplier, int output_shift,
int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
tflite::ArithmeticParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac>
inline void BroadcastAddFivefold(
int y0, int y1, int y2, int y3, int y4, int left_shift,
const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift, const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
int input2_shift, int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
tflite::ArithmeticParams op_params;
op_params.broadcast_category =
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
op_params.broadcast_shape[4] = y0;
op_params.broadcast_shape[3] = y1;
op_params.broadcast_shape[2] = y2;
op_params.broadcast_shape[1] = y3;
op_params.broadcast_shape[0] = y4;
BroadcastAddFivefold(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac, typename T>
void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T* output_data, const Dims<4>& output_dims) {
T output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
BroadcastAdd(input1_data, input1_dims, input2_data, input2_dims,
output_activation_min, output_activation_max, output_data,
output_dims);
}
template <FusedActivationFunctionType Ac>
inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
int input1_shift, const int16* input2_data,
const Dims<4>& input2_dims, int input2_shift,
int16 output_activation_min, int16 output_activation_max,
int16* output_data, const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, -32768);
TFLITE_DCHECK_EQ(output_activation_max, 32767);
}
tflite::ArithmeticParams op_params;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(FusedActivationFunctionType::kNone,
&output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Sub(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
const Dims<4>& input2_dims, T* output_data,
const Dims<4>& output_dims) {
T output_activation_min, output_activation_max;
GetActivationMinMax(FusedActivationFunctionType::kNone,
&output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Sub(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
int32 input1_offset, const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
op_params.input1_offset = input1_offset;
op_params.input2_offset = input2_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
BroadcastMul4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
int32 input1_offset, const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
BroadcastMul(input1_data, input1_dims, input1_offset, input2_data,
input2_dims, input2_offset, output_offset, output_multiplier,
output_shift, output_activation_min, output_activation_max,
output_data, output_dims);
}
inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = kheight;
params.filter_width = kwidth;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, kwidth, kheight, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, float* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
}
inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = filter_height;
params.filter_width = filter_width;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.quantized_activation_min = output_activation_min;
params.quantized_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = kheight;
params.filter_width = kwidth;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width, int pad_height,
int kwidth, int kheight, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
MaxPool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, kwidth, kheight, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width, int filter_height,
float* output_data, const Dims<4>& output_dims) {
MaxPool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
}
inline void MaxPool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = filter_height;
params.filter_width = filter_width;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.quantized_activation_min = output_activation_min;
params.quantized_activation_max = output_activation_max;
MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width, int pad_height,
int filter_width, int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
MaxPool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
MaxPool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
inline void L2Pool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = filter_height;
params.filter_width = filter_width;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
L2Pool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void L2Pool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width, int pad_height,
int filter_width, int filter_height, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
L2Pool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void L2Pool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width, int filter_height,
float* output_data, const Dims<4>& output_dims) {
L2Pool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
}
inline void Softmax(const SoftmaxParams& params,
const RuntimeShape& input_shape, const uint8* input_data,
const RuntimeShape& output_shape, uint8* output_data) {
const int32 input_beta_multiplier = params.input_multiplier;
const int32 input_beta_left_shift = params.input_left_shift;
const int diff_min = params.diff_min;
// The representation chosen for the input to the exp() function is Q5.26.
// We need to leave extra space since values that we skip might be as large as
// -32 before multiplying by input_beta_multiplier, and therefore as large as
// -16 afterwards. Note that exp(-8) is definitely not insignificant to
// accumulation, but exp(-16) definitely is.
static const int kScaledDiffIntegerBits = 5;
static const int kAccumulationIntegerBits = 12;
using FixedPointScaledDiff =
gemmlowp::FixedPoint<int32, kScaledDiffIntegerBits>;
using FixedPointAccum = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
ruy::profiler::ScopeLabel label("Softmax/8bit");
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
for (int b = 0; b < outer_size; ++b) {
const uint8* input_data_ptr = input_data + b * depth;
uint8* output_data_ptr = output_data + b * depth;
// Determine the largest entry in the current row
uint8 max_in_row = 0;
{
int c = 0;
#ifdef USE_NEON
uint8x16_t max16_0 = vdupq_n_u8(0);
uint8x16_t max16_1 = vdupq_n_u8(0);
for (; c <= depth - 32; c += 32) {
max16_0 = vmaxq_u8(max16_0, vld1q_u8(input_data_ptr + c + 0));
max16_1 = vmaxq_u8(max16_1, vld1q_u8(input_data_ptr + c + 16));
}
uint8x16_t max16 = vmaxq_u8(max16_0, max16_1);
if (c <= depth - 16) {
max16 = vmaxq_u8(max16, vld1q_u8(input_data_ptr + c));
c += 16;
}
uint8x8_t max8 = vmax_u8(vget_low_u8(max16), vget_high_u8(max16));
if (c <= depth - 8) {
max8 = vmax_u8(max8, vld1_u8(input_data_ptr + c));
c += 8;
}
uint8x8_t max4 = vmax_u8(max8, vext_u8(max8, max8, 4));
uint8x8_t max2 = vmax_u8(max4, vext_u8(max4, max4, 2));
uint8x8_t max1 = vpmax_u8(max2, max2);
max_in_row = vget_lane_u8(max1, 0);
#endif
for (; c < depth; ++c) {
max_in_row = std::max(max_in_row, input_data_ptr[c]);
}
}
#ifdef USE_NEON
using FixedPointAccumInt32x4 =
gemmlowp::FixedPoint<int32x4_t, kAccumulationIntegerBits>;
using FixedPointScaledDiffInt32x4 =
gemmlowp::FixedPoint<int32x4_t, kScaledDiffIntegerBits>;
using FixedPoint0Int32x4 = gemmlowp::FixedPoint<int32x4_t, 0>;
FixedPoint0Int32x4 input_beta_multiplier_f0 =
FixedPoint0Int32x4::FromScalarRaw(input_beta_multiplier);
int16x8_t max_in_row_s16 = vdupq_n_s16(max_in_row);
#endif
// Compute the sum of exponentials of the differences of entries in the
// current row from the largest entry in the current row.
FixedPointAccum sum_of_exps = FixedPointAccum::Zero();
{
int c = 0;
#ifdef USE_NEON
int32x4_t diff_min_s32 = vdupq_n_s32(diff_min);
FixedPointAccumInt32x4 sum_of_exps_0 = FixedPointAccumInt32x4::Zero();
FixedPointAccumInt32x4 sum_of_exps_1 = FixedPointAccumInt32x4::Zero();
FixedPointAccumInt32x4 zeros = FixedPointAccumInt32x4::Zero();
for (; c <= depth - 8; c += 8) {
uint16x8_t input_u16 = vmovl_u8(vld1_u8(input_data_ptr + c));
int16x8_t input_diff_s16 =
vsubq_s16(vreinterpretq_s16_u16(input_u16), max_in_row_s16);
int32x4_t input_diff_s32_0 = vmovl_s16(vget_low_s16(input_diff_s16));
int32x4_t input_diff_s32_1 = vmovl_s16(vget_high_s16(input_diff_s16));
int32x4_t mask_0 =
gemmlowp::MaskIfGreaterThanOrEqual(input_diff_s32_0, diff_min_s32);
int32x4_t mask_1 =
gemmlowp::MaskIfGreaterThanOrEqual(input_diff_s32_1, diff_min_s32);
FixedPointScaledDiffInt32x4 scaled_diff_0 =
input_beta_multiplier_f0 *
FixedPointScaledDiffInt32x4::FromRaw(
gemmlowp::ShiftLeft(input_diff_s32_0, input_beta_left_shift));
FixedPointScaledDiffInt32x4 scaled_diff_1 =
input_beta_multiplier_f0 *
FixedPointScaledDiffInt32x4::FromRaw(
gemmlowp::ShiftLeft(input_diff_s32_1, input_beta_left_shift));
FixedPointAccumInt32x4 exps_0 =
gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(scaled_diff_0));
FixedPointAccumInt32x4 exps_1 =
gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(scaled_diff_1));
FixedPointAccumInt32x4 masked_exps_0 =
SelectUsingMask(mask_0, exps_0, zeros);
FixedPointAccumInt32x4 masked_exps_1 =
SelectUsingMask(mask_1, exps_1, zeros);
sum_of_exps_0 = sum_of_exps_0 + masked_exps_0;
sum_of_exps_1 = sum_of_exps_1 + masked_exps_1;
}
int32x4_t sum_of_exps_reduced_4 = (sum_of_exps_0 + sum_of_exps_1).raw();
int32x2_t sum_of_exps_reduced_2 =
vadd_s32(vget_low_s32(sum_of_exps_reduced_4),
vget_high_s32(sum_of_exps_reduced_4));
int32x2_t sum_of_exps_reduced_1 =
vpadd_s32(sum_of_exps_reduced_2, sum_of_exps_reduced_2);
sum_of_exps =
FixedPointAccum::FromRaw(vget_lane_s32(sum_of_exps_reduced_1, 0));
#endif
for (; c < depth; ++c) {
int32 input_diff = static_cast<int32>(input_data_ptr[c]) - max_in_row;
if (input_diff >= diff_min) {
const int32 input_diff_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_diff, input_beta_multiplier, input_beta_left_shift);
const FixedPointScaledDiff scaled_diff_f8 =
FixedPointScaledDiff::FromRaw(input_diff_rescaled);
sum_of_exps =
sum_of_exps + gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(scaled_diff_f8));
}
}
}
// Compute the fixed-point multiplier and shift that we need to apply to
// perform a division by the above-computed sum-of-exponentials.
int num_bits_over_unit = 0;
FixedPoint0 shifted_scale = FixedPoint0::FromRaw(GetReciprocal(
sum_of_exps.raw(), kAccumulationIntegerBits, &num_bits_over_unit));
// Compute the quotients of exponentials of differences of entries in the
// current row from the largest entry, over the previously-computed sum of
// exponentials.
{
int c = 0;
#ifdef USE_NEON
int16x8_t diff_min_s16 = vdupq_n_s16(diff_min);
for (; c <= depth - 8; c += 8) {
uint16x8_t input_u16 = vmovl_u8(vld1_u8(input_data_ptr + c));
int16x8_t input_diff_s16 =
vsubq_s16(vreinterpretq_s16_u16(input_u16), max_in_row_s16);
int32x4_t input_diff_s32_0 = vmovl_s16(vget_low_s16(input_diff_s16));
int32x4_t input_diff_s32_1 = vmovl_s16(vget_high_s16(input_diff_s16));
uint8x8_t mask = vmovn_u16(vcgeq_s16(input_diff_s16, diff_min_s16));
FixedPointScaledDiffInt32x4 scaled_diff_0 =
input_beta_multiplier_f0 *
FixedPointScaledDiffInt32x4::FromRaw(
gemmlowp::ShiftLeft(input_diff_s32_0, input_beta_left_shift));
FixedPointScaledDiffInt32x4 scaled_diff_1 =
input_beta_multiplier_f0 *
FixedPointScaledDiffInt32x4::FromRaw(
gemmlowp::ShiftLeft(input_diff_s32_1, input_beta_left_shift));
FixedPoint0Int32x4 exp_0 = exp_on_negative_values(scaled_diff_0);
FixedPoint0Int32x4 exp_1 = exp_on_negative_values(scaled_diff_1);
int32x4_t output_s32_0 = gemmlowp::RoundingDivideByPOT(
vqrdmulhq_n_s32(exp_0.raw(), shifted_scale.raw()),
num_bits_over_unit + 31 - 8);
int32x4_t output_s32_1 = gemmlowp::RoundingDivideByPOT(
vqrdmulhq_n_s32(exp_1.raw(), shifted_scale.raw()),
num_bits_over_unit + 31 - 8);
int16x8_t output_s16 =
vcombine_s16(vqmovn_s32(output_s32_0), vqmovn_s32(output_s32_1));
uint8x8_t output_u8 = vqmovun_s16(output_s16);
uint8x8_t masked_output = vbsl_u8(mask, output_u8, vdup_n_u8(0));
vst1_u8(output_data_ptr + c, masked_output);
}
#endif
for (; c < depth; ++c) {
int32 input_diff = static_cast<int32>(input_data_ptr[c]) - max_in_row;
if (input_diff >= diff_min) {
const int32 input_diff_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_diff, input_beta_multiplier, input_beta_left_shift);
const FixedPointScaledDiff scaled_diff_f8 =
FixedPointScaledDiff::FromRaw(input_diff_rescaled);
FixedPoint0 exp_in_0 = exp_on_negative_values(scaled_diff_f8);
int32 unsat_output = gemmlowp::RoundingDivideByPOT(
(shifted_scale * exp_in_0).raw(), num_bits_over_unit + 31 - 8);
output_data_ptr[c] = std::max(std::min(unsat_output, 255), 0);
} else {
output_data_ptr[c] = 0;
}
}
}
}
}
inline void Softmax(const float* input_data, const RuntimeShape& input_shape,
float beta, float* output_data,
const RuntimeShape& output_shape) {
SoftmaxParams params;
params.beta = beta;
Softmax(params, input_shape, input_data, output_shape, output_data);
}
inline void Softmax(const float* input_data, const Dims<4>& input_dims,
float beta, float* output_data,
const Dims<4>& output_dims) {
Softmax(input_data, DimsToShape(input_dims), beta, output_data,
DimsToShape(output_dims));
}
inline void Softmax(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_beta_multiplier, int32 input_beta_left_shift,
int diff_min, uint8* output_data,
const RuntimeShape& output_shape) {
SoftmaxParams params;
params.input_multiplier = input_beta_multiplier;
params.input_left_shift = input_beta_left_shift;
params.diff_min = diff_min;
Softmax(params, input_shape, input_data, output_shape, output_data);
}
inline void Softmax(const uint8* input_data, const Dims<4>& input_dims,
int32 input_beta_multiplier, int32 input_beta_left_shift,
int diff_min, uint8* output_data,
const Dims<4>& output_dims) {
Softmax(input_data, DimsToShape(input_dims), input_beta_multiplier,
input_beta_left_shift, diff_min, output_data,
DimsToShape(output_dims));
}
inline void LogSoftmax(const float* input_data, const RuntimeShape& input_shape,
float* output_data, const RuntimeShape& output_shape) {
SoftmaxParams params;
// No params currently used for float LogSoftmax.
LogSoftmax(params, input_shape, input_data, output_shape, output_data);
}
inline void LogSoftmax(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
LogSoftmax(input_data, DimsToShape(input_dims), output_data,
DimsToShape(output_dims));
}
inline void LogSoftmax(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_multiplier, int32 input_left_shift,
int32 reverse_scaling_divisor,
int32 reverse_scaling_right_shift, int diff_min,
uint8* output_data, const RuntimeShape& output_shape) {
SoftmaxParams params;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
params.reverse_scaling_divisor = reverse_scaling_divisor;
params.reverse_scaling_right_shift = reverse_scaling_right_shift;
params.diff_min = diff_min;
reference_ops::LogSoftmax(params, input_shape, input_data, output_shape,
output_data);
}
inline void LogSoftmax(const uint8* input_data, const Dims<4>& input_dims,
int32 input_multiplier, int32 input_left_shift,
int32 reverse_scaling_divisor,
int32 reverse_scaling_right_shift, int diff_min,
uint8* output_data, const Dims<4>& output_dims) {
reference_ops::LogSoftmax(
input_data, DimsToShape(input_dims), input_multiplier, input_left_shift,
reverse_scaling_divisor, reverse_scaling_right_shift, diff_min,
output_data, DimsToShape(output_dims));
}
inline void Logistic(const LogisticParams& params,
const RuntimeShape& input_shape, const uint8* input_data,
const RuntimeShape& output_shape, uint8* output_data) {
ruy::profiler::ScopeLabel label("Logistic/Uint8");
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int32 input_multiplier = params.input_multiplier;
const int input_left_shift = params.input_left_shift;
const int size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
#ifdef USE_NEON
// Handle 16 values at a time
for (; c <= size - 16; c += 16) {
// Read input uint8 values, cast to int16 and subtract input_zero_point
uint8x16_t input_val_u8 = vld1q_u8(input_data + c);
int16x8_t input_val_centered_0 =
vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(input_val_u8))),
vdupq_n_s16(input_zero_point));
int16x8_t input_val_centered_1 =
vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(input_val_u8))),
vdupq_n_s16(input_zero_point));
// Prepare the bit masks that we will use at the end to implement the logic
// that was expressed in the scalar code with branching:
// if (input_val_centered < -input_range_radius) {
// output_val = 0;
// } else if (input_val_centered > input_range_radius) {
// output_val = 255;
// } else {
// ...
uint16x8_t mask_rightclamp_0 =
vcgtq_s16(input_val_centered_0, vdupq_n_s16(input_range_radius));
uint16x8_t mask_rightclamp_1 =
vcgtq_s16(input_val_centered_1, vdupq_n_s16(input_range_radius));
uint16x8_t mask_leftclamp_0 =
vcgeq_s16(input_val_centered_0, vdupq_n_s16(-input_range_radius));
uint16x8_t mask_leftclamp_1 =
vcgeq_s16(input_val_centered_1, vdupq_n_s16(-input_range_radius));
uint8x16_t mask_rightclamp = vcombine_u8(vshrn_n_u16(mask_rightclamp_0, 8),
vshrn_n_u16(mask_rightclamp_1, 8));
uint8x16_t mask_leftclamp = vcombine_u8(vshrn_n_u16(mask_leftclamp_0, 8),
vshrn_n_u16(mask_leftclamp_1, 8));
// This performs what is expressed in the scalar code as
// const int32 input_val_rescaled =
// MultiplyByQuantizedMultiplierGreaterThanOne(
// input_val_centered, input_multiplier, input_left_shift);
int32x4_t input_val_rescaled_0 =
vshlq_s32(vmovl_s16(vget_low_s16(input_val_centered_0)),
vdupq_n_s32(input_left_shift));
int32x4_t input_val_rescaled_1 =
vshlq_s32(vmovl_s16(vget_high_s16(input_val_centered_0)),
vdupq_n_s32(input_left_shift));
int32x4_t input_val_rescaled_2 =
vshlq_s32(vmovl_s16(vget_low_s16(input_val_centered_1)),
vdupq_n_s32(input_left_shift));
int32x4_t input_val_rescaled_3 =
vshlq_s32(vmovl_s16(vget_high_s16(input_val_centered_1)),
vdupq_n_s32(input_left_shift));
input_val_rescaled_0 =
vqrdmulhq_n_s32(input_val_rescaled_0, input_multiplier);
input_val_rescaled_1 =
vqrdmulhq_n_s32(input_val_rescaled_1, input_multiplier);
input_val_rescaled_2 =
vqrdmulhq_n_s32(input_val_rescaled_2, input_multiplier);
input_val_rescaled_3 =
vqrdmulhq_n_s32(input_val_rescaled_3, input_multiplier);
// Invoke gemmlowp::logistic on FixedPoint wrapping int32x4_t
using FixedPoint4 = gemmlowp::FixedPoint<int32x4_t, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int32x4_t, 0>;
const FixedPoint4 input_val_f4_0 =
FixedPoint4::FromRaw(input_val_rescaled_0);
const FixedPoint4 input_val_f4_1 =
FixedPoint4::FromRaw(input_val_rescaled_1);
const FixedPoint4 input_val_f4_2 =
FixedPoint4::FromRaw(input_val_rescaled_2);
const FixedPoint4 input_val_f4_3 =
FixedPoint4::FromRaw(input_val_rescaled_3);
const FixedPoint0 output_val_f0_0 = gemmlowp::logistic(input_val_f4_0);
const FixedPoint0 output_val_f0_1 = gemmlowp::logistic(input_val_f4_1);
const FixedPoint0 output_val_f0_2 = gemmlowp::logistic(input_val_f4_2);
const FixedPoint0 output_val_f0_3 = gemmlowp::logistic(input_val_f4_3);
// Divide by 2^23 as in the scalar code
using gemmlowp::RoundingDivideByPOT;
int32x4_t output_val_s32_0 = RoundingDivideByPOT(output_val_f0_0.raw(), 23);
int32x4_t output_val_s32_1 = RoundingDivideByPOT(output_val_f0_1.raw(), 23);
int32x4_t output_val_s32_2 = RoundingDivideByPOT(output_val_f0_2.raw(), 23);
int32x4_t output_val_s32_3 = RoundingDivideByPOT(output_val_f0_3.raw(), 23);
// Cast output values to uint8, saturating
int16x8_t output_val_s16_0 = vcombine_s16(vqmovn_s32(output_val_s32_0),
vqmovn_s32(output_val_s32_1));
int16x8_t output_val_s16_1 = vcombine_s16(vqmovn_s32(output_val_s32_2),
vqmovn_s32(output_val_s32_3));
uint8x16_t output_val_u8 = vcombine_u8(vqmovun_s16(output_val_s16_0),
vqmovun_s16(output_val_s16_1));
// Perform the bit-masking with the bit masks computed at the beginning,
// see the comment there.
output_val_u8 = vorrq_u8(output_val_u8, mask_rightclamp);
output_val_u8 = vandq_u8(output_val_u8, mask_leftclamp);
// Store back to memory
vst1q_u8(output_data + c, output_val_u8);
}
#endif
// Leftover loop: handle one value at a time with scalar code.
for (; c < size; ++c) {
const uint8 input_val_u8 = input_data[c];
const int32 input_val_centered =
static_cast<int32>(input_val_u8) - input_zero_point;
uint8 output_val;
if (input_val_centered < -input_range_radius) {
output_val = 0;
} else if (input_val_centered > input_range_radius) {
output_val = 255;
} else {
const int32 input_val_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_val_centered, input_multiplier, input_left_shift);
using FixedPoint4 = gemmlowp::FixedPoint<int32, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::logistic(input_val_f4);
using gemmlowp::RoundingDivideByPOT;
int32 output_val_s32 = RoundingDivideByPOT(output_val_f0.raw(), 23);
if (output_val_s32 == 256) {
output_val_s32 = 255;
}
TFLITE_DCHECK_GE(output_val_s32, 0);
TFLITE_DCHECK_LE(output_val_s32, 255);
output_val = static_cast<uint8>(output_val_s32);
}
output_data[c] = output_val;
}
}
inline void Logistic(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const RuntimeShape& output_shape) {
LogisticParams params;
params.input_zero_point = input_zero_point;
params.input_range_radius = input_range_radius;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
Logistic(params, input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Logistic(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Logistic(const uint8* input_data, const Dims<4>& input_dims,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const Dims<4>& output_dims) {
Logistic(input_data, DimsToShape(input_dims), input_zero_point,
input_range_radius, input_multiplier, input_left_shift, output_data,
DimsToShape(output_dims));
}
inline void Logistic(const RuntimeShape& input_shape, const int16* input_data,
const RuntimeShape& output_shape, int16* output_data) {
LogisticParams params;
// No params currently needed by int16 Logistic.
Logistic(params, input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const int16* input_data, const RuntimeShape& input_shape,
int16* output_data, const RuntimeShape& output_shape) {
LogisticParams params;
// No params currently needed by int16 Logistic.
Logistic(params, input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const int16* input_data, const Dims<4>& input_dims,
int16* output_data, const Dims<4>& output_dims) {
Logistic(input_data, DimsToShape(input_dims), output_data,
DimsToShape(output_dims));
}
inline void Tanh(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Tanh(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& output_shape,
uint8* output_data) {
// Note that this is almost the exact same code as in Logistic().
ruy::profiler::ScopeLabel label("Tanh");
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int32 input_multiplier = params.input_multiplier;
const int input_left_shift = params.input_left_shift;
const int size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
int32_t output_zero_point = 128;
#ifdef USE_NEON
// Handle 16 values at a time
for (; c <= size - 16; c += 16) {
// Read input uint8 values, cast to int16 and subtract input_zero_point
uint8x16_t input_val_u8 = vld1q_u8(input_data + c);
int16x8_t input_val_centered_0 =
vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(input_val_u8))),
vdupq_n_s16(input_zero_point));
int16x8_t input_val_centered_1 =
vsubq_s16(vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(input_val_u8))),
vdupq_n_s16(input_zero_point));
// Prepare the bit masks that we will use at the end to implement the logic
// that was expressed in the scalar code with branching:
// if (input_val_centered < -input_range_radius) {
// output_val = 0;
// } else if (input_val_centered > input_range_radius) {
// output_val = 255;
// } else {
// ...
uint16x8_t mask_rightclamp_0 =
vcgtq_s16(input_val_centered_0, vdupq_n_s16(input_range_radius));
uint16x8_t mask_rightclamp_1 =
vcgtq_s16(input_val_centered_1, vdupq_n_s16(input_range_radius));
uint16x8_t mask_leftclamp_0 =
vcgeq_s16(input_val_centered_0, vdupq_n_s16(-input_range_radius));
uint16x8_t mask_leftclamp_1 =
vcgeq_s16(input_val_centered_1, vdupq_n_s16(-input_range_radius));
uint8x16_t mask_rightclamp = vcombine_u8(vshrn_n_u16(mask_rightclamp_0, 8),
vshrn_n_u16(mask_rightclamp_1, 8));
uint8x16_t mask_leftclamp = vcombine_u8(vshrn_n_u16(mask_leftclamp_0, 8),
vshrn_n_u16(mask_leftclamp_1, 8));
// This performs what is expressed in the scalar code as
// const int32 input_val_rescaled =
// MultiplyByQuantizedMultiplierGreaterThanOne(
// input_val_centered, input_multiplier, input_left_shift);
int32x4_t input_val_rescaled_0 =
vshlq_s32(vmovl_s16(vget_low_s16(input_val_centered_0)),
vdupq_n_s32(input_left_shift));
int32x4_t input_val_rescaled_1 =
vshlq_s32(vmovl_s16(vget_high_s16(input_val_centered_0)),
vdupq_n_s32(input_left_shift));
int32x4_t input_val_rescaled_2 =
vshlq_s32(vmovl_s16(vget_low_s16(input_val_centered_1)),
vdupq_n_s32(input_left_shift));
int32x4_t input_val_rescaled_3 =
vshlq_s32(vmovl_s16(vget_high_s16(input_val_centered_1)),
vdupq_n_s32(input_left_shift));
input_val_rescaled_0 =
vqrdmulhq_n_s32(input_val_rescaled_0, input_multiplier);
input_val_rescaled_1 =
vqrdmulhq_n_s32(input_val_rescaled_1, input_multiplier);
input_val_rescaled_2 =
vqrdmulhq_n_s32(input_val_rescaled_2, input_multiplier);
input_val_rescaled_3 =
vqrdmulhq_n_s32(input_val_rescaled_3, input_multiplier);
// Invoke gemmlowp::tanh on FixedPoint wrapping int32x4_t
using FixedPoint4 = gemmlowp::FixedPoint<int32x4_t, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int32x4_t, 0>;
const FixedPoint4 input_val_f4_0 =
FixedPoint4::FromRaw(input_val_rescaled_0);
const FixedPoint4 input_val_f4_1 =
FixedPoint4::FromRaw(input_val_rescaled_1);
const FixedPoint4 input_val_f4_2 =
FixedPoint4::FromRaw(input_val_rescaled_2);
const FixedPoint4 input_val_f4_3 =
FixedPoint4::FromRaw(input_val_rescaled_3);
const FixedPoint0 output_val_f0_0 = gemmlowp::tanh(input_val_f4_0);
const FixedPoint0 output_val_f0_1 = gemmlowp::tanh(input_val_f4_1);
const FixedPoint0 output_val_f0_2 = gemmlowp::tanh(input_val_f4_2);
const FixedPoint0 output_val_f0_3 = gemmlowp::tanh(input_val_f4_3);
// Divide by 2^24 as in the scalar code
using gemmlowp::RoundingDivideByPOT;
int32x4_t output_val_s32_0 = RoundingDivideByPOT(output_val_f0_0.raw(), 24);
int32x4_t output_val_s32_1 = RoundingDivideByPOT(output_val_f0_1.raw(), 24);
int32x4_t output_val_s32_2 = RoundingDivideByPOT(output_val_f0_2.raw(), 24);
int32x4_t output_val_s32_3 = RoundingDivideByPOT(output_val_f0_3.raw(), 24);
// Add the output zero point
int32x4_t output_zero_point_s32 = vdupq_n_s32(output_zero_point);
output_val_s32_0 = vaddq_s32(output_val_s32_0, output_zero_point_s32);
output_val_s32_1 = vaddq_s32(output_val_s32_1, output_zero_point_s32);
output_val_s32_2 = vaddq_s32(output_val_s32_2, output_zero_point_s32);
output_val_s32_3 = vaddq_s32(output_val_s32_3, output_zero_point_s32);
// Cast output values to uint8, saturating
int16x8_t output_val_s16_0 = vcombine_s16(vqmovn_s32(output_val_s32_0),
vqmovn_s32(output_val_s32_1));
int16x8_t output_val_s16_1 = vcombine_s16(vqmovn_s32(output_val_s32_2),
vqmovn_s32(output_val_s32_3));
uint8x16_t output_val_u8 = vcombine_u8(vqmovun_s16(output_val_s16_0),
vqmovun_s16(output_val_s16_1));
// Perform the bit-masking with the bit masks computed at the beginning,
// see the comment there.
output_val_u8 = vorrq_u8(output_val_u8, mask_rightclamp);
output_val_u8 = vandq_u8(output_val_u8, mask_leftclamp);
// Store back to memory
vst1q_u8(output_data + c, output_val_u8);
}
#endif
// Leftover loop: handle one value at a time with scalar code.
for (; c < size; ++c) {
const uint8 input_val_u8 = input_data[c];
const int32 input_val_centered =
static_cast<int32>(input_val_u8) - input_zero_point;
uint8 output_val;
if (input_val_centered < -input_range_radius) {
output_val = 0;
} else if (input_val_centered > input_range_radius) {
output_val = 255;
} else {
const int32 input_val_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_val_centered, input_multiplier, input_left_shift);
using FixedPoint4 = gemmlowp::FixedPoint<int32, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::tanh(input_val_f4);
using gemmlowp::RoundingDivideByPOT;
int32 output_val_s32 = RoundingDivideByPOT(output_val_f0.raw(), 24);
output_val_s32 += output_zero_point;
if (output_val_s32 == 256) {
output_val_s32 = 255;
}
TFLITE_DCHECK_GE(output_val_s32, 0);
TFLITE_DCHECK_LE(output_val_s32, 255);
output_val = static_cast<uint8>(output_val_s32);
}
output_data[c] = output_val;
}
}
inline void Tanh(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const RuntimeShape& output_shape) {
TanhParams params;
params.input_zero_point = input_zero_point;
params.input_range_radius = input_range_radius;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
Tanh(params, input_shape, input_data, output_shape, output_data);
}
inline void Tanh(const uint8* input_data, const Dims<4>& input_dims,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const Dims<4>& output_dims) {
Tanh(input_data, DimsToShape(input_dims), input_zero_point,
input_range_radius, input_multiplier, input_left_shift, output_data,
DimsToShape(output_dims));
}
inline void Tanh(const int16* input_data, const RuntimeShape& input_shape,
int input_left_shift, int16* output_data,
const RuntimeShape& output_shape) {
TanhParams params;
params.input_left_shift = input_left_shift;
Tanh(params, input_shape, input_data, output_shape, output_data);
}
inline void Tanh(const int16* input_data, const Dims<4>& input_dims,
int input_left_shift, int16* output_data,
const Dims<4>& output_dims) {
Tanh(input_data, DimsToShape(input_dims), input_left_shift, output_data,
DimsToShape(output_dims));
}
template <typename T>
inline void DepthToSpace(const T* input_data, const Dims<4>& input_dims,
int block_size, T* output_data,
const Dims<4>& output_dims) {
tflite::DepthToSpaceParams op_params;
op_params.block_size = block_size;
DepthToSpace(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void SpaceToDepth(const T* input_data, const Dims<4>& input_dims,
int block_size, T* output_data,
const Dims<4>& output_dims) {
tflite::SpaceToDepthParams op_params;
op_params.block_size = block_size;
SpaceToDepth(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
inline void Mul(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac>
void Mul(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
Mul(input1_data, input1_dims, input2_data, input2_dims, output_activation_min,
output_activation_max, output_data, output_dims);
}
inline void Mul(const int32* input1_data, const Dims<4>& input1_dims,
const int32* input2_data, const Dims<4>& input2_dims,
int32 output_activation_min, int32 output_activation_max,
int32* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac>
void Mul(const int32* input1_data, const Dims<4>& input1_dims,
const int32* input2_data, const Dims<4>& input2_dims,
int32* output_data, const Dims<4>& output_dims) {
TFLITE_DCHECK(Ac == FusedActivationFunctionType::kNone);
tflite::ArithmeticParams op_params;
// No parameters needed.
MulNoActivation(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
inline void Mul(const int16* input1_data, const Dims<4>& input1_dims,
const int16* input2_data, const Dims<4>& input2_dims,
int16* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
// No parameters needed.
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void Mul(const int16* input1_data, const Dims<4>& input1_dims,
const int16* input2_data, const Dims<4>& input2_dims,
int32 output_offset, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.output_offset = output_offset;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void BroadcastMul(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
BroadcastMul4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
// For compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
inline void BroadcastMul(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
float float_activation_min;
float float_activation_max;
GetActivationMinMax(Ac, &float_activation_min, &float_activation_max);
SetActivationParams(float_activation_min, float_activation_max, &op_params);
BroadcastMul4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
inline void LocalResponseNormalization(const float* input_data,
const Dims<4>& input_dims, int range,
float bias, float alpha, float beta,
float* output_data,
const Dims<4>& output_dims) {
tflite::LocalResponseNormalizationParams op_params;
op_params.range = range;
op_params.bias = bias;
op_params.alpha = alpha;
op_params.beta = beta;
LocalResponseNormalization(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename SrcT, typename DstT>
void Cast(const SrcT* input_data, const Dims<4>& input_dims, DstT* output_data,
const Dims<4>& output_dims) {
Cast(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Floor(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Floor(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void ResizeBilinear(const float* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, float* output_data,
const Dims<4>& output_dims, bool align_corners) {
tflite::ResizeBilinearParams op_params;
op_params.align_corners = align_corners;
op_params.half_pixel_centers = false;
ResizeBilinear(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_size_dims), output_size_data,
DimsToShape(output_dims), output_data);
}
inline void ResizeBilinear(const uint8* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, uint8* output_data,
const Dims<4>& output_dims, bool align_corners) {
tflite::ResizeBilinearParams op_params;
op_params.align_corners = align_corners;
op_params.half_pixel_centers = false;
ResizeBilinear(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_size_dims), output_size_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
inline void ResizeBilinear(const float* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, float* output_data,
const Dims<4>& output_dims) {
ResizeBilinear(input_data, input_dims, output_size_data, output_size_dims,
output_data, output_dims, /*align_corners=*/false);
}
// legacy, for compatibility with old checked-in code
inline void ResizeBilinear(const uint8* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, uint8* output_data,
const Dims<4>& output_dims) {
ResizeBilinear(input_data, input_dims, output_size_data, output_size_dims,
output_data, output_dims, /*align_corners=*/false);
}
template <typename T>
inline void BatchToSpaceND(const T* input_data, const Dims<4>& input_dims,
const int32* block_shape_data,
const Dims<4>& block_shape_dims,
const int32* crops_data, const Dims<4>& crops_dims,
T* output_data, const Dims<4>& output_dims) {
BatchToSpaceND(DimsToShape(input_dims), input_data,
DimsToShape(block_shape_dims), block_shape_data,
DimsToShape(crops_dims), crops_data, DimsToShape(output_dims),
output_data);
}
// Legacy signature, function covered both Pad and PadV2.
template <typename T>
inline void PadV2(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& left_paddings,
const std::vector<int>& right_paddings, T* output_data,
const Dims<4>& output_dims, const T pad_value) {
TFLITE_DCHECK_EQ(left_paddings.size(), 4);
TFLITE_DCHECK_EQ(right_paddings.size(), 4);
tflite::PadParams op_params;
op_params.left_padding_count = 4;
op_params.right_padding_count = 4;
for (int i = 0; i < 4; ++i) {
op_params.left_padding[i] = left_paddings[3 - i];
op_params.right_padding[i] = right_paddings[3 - i];
}
const T pad_value_copy = pad_value;
Pad(op_params, DimsToShape(input_dims), input_data, &pad_value_copy,
DimsToShape(output_dims), output_data);
}
// Old Pad that calls legacy PadV2.
template <typename T>
inline void Pad(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& left_paddings,
const std::vector<int>& right_paddings, T* output_data,
const Dims<4>& output_dims, const int32_t pad_value) {
const T converted_pad_value = static_cast<T>(pad_value);
PadV2<T>(input_data, input_dims, left_paddings, right_paddings, output_data,
output_dims, converted_pad_value);
}
// Old Pad that only padded with 0.
template <typename T>
inline void Pad(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& left_paddings,
const std::vector<int>& right_paddings, T* output_data,
const Dims<4>& output_dims) {
const T pad_value = static_cast<T>(0);
PadV2<T>(input_data, input_dims, left_paddings, right_paddings, output_data,
output_dims, pad_value);
}
template <typename T>
inline void Slice(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& begin, const std::vector<int>& size,
T* output_data, const Dims<4>& output_dims) {
tflite::SliceParams op_params;
op_params.begin_count = 4;
op_params.size_count = 4;
for (int i = 0; i < 4; ++i) {
op_params.begin[i] = begin[3 - i];
op_params.size[i] = size[3 - i];
}
Slice(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
void TensorFlowMinimum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
Minimum(DimsToShape(input1_dims), input1_data, input2_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
void TensorFlowMaximum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
Maximum(DimsToShape(input1_dims), input1_data, input2_data,
DimsToShape(output_dims), output_data);
}
inline void Dequantize(const uint8* input_data, const Dims<4>& input_dims,
int32 zero_point, double scale, float* output_data,
const Dims<4>& output_dims) {
tflite::DequantizationParams op_params;
op_params.zero_point = zero_point;
op_params.scale = scale;
Dequantize(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
void Transpose(const T* input, const Dims<4>& input_dims, T* output,
const Dims<4>& output_dims, const int* permuted_axes) {
TransposeParams params;
params.perm_count = 4;
for (int i = 0; i < 4; ++i) {
params.perm[i] = 3 - permuted_axes[3 - i];
}
Transpose(params, DimsToShape(input_dims), input, DimsToShape(output_dims),
output);
}
template <typename T>
inline void StridedSlice(const T* input_data, const Dims<4>& input_dims,
int begin_mask, int end_mask, int shrink_axis_mask,
const std::vector<int>& start_indices,
const std::vector<int>& stop_indices,
const std::vector<int>& strides, T* output_data,
const Dims<4>& output_dims) {
TFLITE_DCHECK_EQ(start_indices.size(), 4);
auto op_params = strided_slice::BuildStridedSliceParams(
begin_mask, end_mask, shrink_axis_mask, start_indices, stop_indices,
strides);
reference_ops::StridedSliceReverseIndices(&op_params);
StridedSlice(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T1, typename T2, typename T3>
void ArgMax(const T3* axis, const T1* input_data,
const tflite::Dims<4>& input_dims, T2* output_data,
const tflite::Dims<4>& output_dims) {
// Assumes the input always has 4 dimensions, and therefore,
// output always has three dimensions.
auto output_shape = RuntimeShape(
{output_dims.sizes[2], output_dims.sizes[1], output_dims.sizes[0]});
// Another way to interpret this is that output_dims.sizes[4] is always 1.
TFLITE_DCHECK_EQ(output_shape.FlatSize(),
DimsToShape(output_dims).FlatSize());
// Legacy path only supported this.
TFLITE_DCHECK_EQ(axis[0], 3);
ArgMinMax(DimsToShape(input_dims), input_data, axis, output_shape,
output_data, /*is_arg_max=*/true);
}
template <typename T1, typename T2, typename T3>
void ArgMinMax(const T3* axis, const T1* input_data, const Dims<4>& input_dims,
T2* output_data, const Dims<4>& output_dims,
const bool is_arg_max) {
ArgMinMax(axis, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data, is_arg_max);
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_LEGACY_OPTIMIZED_OPS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/legacy_optimized_ops.h | C++ | apache-2.0 | 244,094 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_MULTITHREADED_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_MULTITHREADED_CONV_H_
#include <assert.h>
#include <stdint.h>
#include <sys/types.h>
#include <algorithm>
#include <cmath>
#include <limits>
#include <memory>
#include <tuple>
#include <type_traits>
#include "tensorflow/lite/c/builtin_op_data.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/optimized/eigen_spatial_convolutions.h"
#include "tensorflow/lite/kernels/internal/optimized/optimized_ops.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace multithreaded_ops {
// Shorthands for the types we need when interfacing with the EigenTensor
// library.
typedef Eigen::TensorMap<
Eigen::Tensor<float, 2, Eigen::RowMajor, Eigen::DenseIndex>, Eigen::Aligned>
EigenMatrix;
typedef Eigen::TensorMap<
Eigen::Tensor<const float, 2, Eigen::RowMajor, Eigen::DenseIndex>,
Eigen::Aligned>
ConstEigenMatrix;
typedef Eigen::TensorMap<
Eigen::Tensor<float, 4, Eigen::RowMajor, Eigen::DenseIndex>, Eigen::Aligned>
EigenTensor;
typedef Eigen::TensorMap<
Eigen::Tensor<const float, 4, Eigen::RowMajor, Eigen::DenseIndex>,
Eigen::Aligned>
ConstEigenTensor;
// Utility functions we need for the EigenTensor API.
template <typename Device, typename T>
struct MatMulConvFunctor {
// Computes on device "d": out = in0 * in1, where * is matrix
// multiplication.
void operator()(
const Device& d, EigenMatrix out, ConstEigenMatrix in0,
ConstEigenMatrix in1,
const Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1>& dim_pair) {
out.device(d) = in0.contract(in1, dim_pair);
}
};
template <class T>
class EigenTensorConvFunctor {
private:
Eigen::PaddingType RuntimePadding2EigenPadding(PaddingType padding) {
switch (padding) {
case PaddingType::kValid:
return Eigen::PADDING_VALID;
case PaddingType::kSame:
return Eigen::PADDING_SAME;
case PaddingType::kNone:
assert(false); // should never get here.
return Eigen::PADDING_VALID;
}
return Eigen::PADDING_SAME; // Prevent compiler warning about missing
// return
}
public:
void operator()(const Eigen::ThreadPoolDevice& device, const T* input_data,
int input_batches, int input_height, int input_width,
int input_depth, const T* filter_data, int filter_height,
int filter_width, int filter_count, int stride_rows,
int stride_cols, int pad_width, int pad_height,
PaddingType padding, T* output_data, int output_height,
int output_width) {
const bool is_1x1_kernel = (filter_height == 1 && filter_width == 1 &&
stride_rows == 1 && stride_cols == 1);
if (is_1x1_kernel) {
// For 1x1 kernel, the 2D convolution is reduced to matrix
// multiplication.
const int conv_width = output_height * output_width;
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0] = Eigen::IndexPair<Eigen::DenseIndex>(1, 0);
EigenMatrix output(output_data, input_batches * conv_width, filter_count);
ConstEigenMatrix input(input_data, input_batches * conv_width,
input_depth);
ConstEigenMatrix filter(filter_data, input_depth, filter_count);
MatMulConvFunctor<Eigen::ThreadPoolDevice, T>()(device, output, input,
filter, dim_pair);
} else if (filter_height == input_height && filter_width == input_width &&
pad_width == 0 && pad_height == 0) {
// If the input data and filter have the same height/width,
// the 2D convolution is reduced to matrix multiplication.
const int k = // Length of reduction dimension.
filter_width * filter_height * input_depth;
Eigen::array<Eigen::IndexPair<Eigen::DenseIndex>, 1> dim_pair;
dim_pair[0] = Eigen::IndexPair<Eigen::DenseIndex>(1, 0);
EigenMatrix output(output_data, input_batches, filter_count);
ConstEigenMatrix input(input_data, input_batches, k);
ConstEigenMatrix filter(filter_data, k, filter_count);
MatMulConvFunctor<Eigen::ThreadPoolDevice, T>()(device, output, input,
filter, dim_pair);
} else {
EigenTensor output(output_data, input_batches, output_height,
output_width, filter_count);
ConstEigenTensor input(input_data, input_batches, input_height,
input_width, input_depth);
ConstEigenTensor filter(filter_data, filter_height, filter_width,
input_depth, filter_count);
output.device(device) =
Eigen::SpatialConvolution(input, filter, stride_cols, stride_rows,
RuntimePadding2EigenPadding(padding));
}
}
};
inline void Conv(const Eigen::ThreadPoolDevice& device,
const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape,
float* im2col_data) {
// Nest profiling under "Conv", to aggregate with other kernels.
ruy::profiler::ScopeLabel label("Conv");
ruy::profiler::ScopeLabel inner_label("Multithreaded EigenTensor");
// im2col data should not be generated for the multi-thread supporting case.
TFLITE_DCHECK(!im2col_data);
(void)im2col_shape;
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const PaddingType padding = params.padding_type;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
EigenTensorConvFunctor<float> conv_functor;
conv_functor(device, input_data, batches, input_height, input_width,
input_depth, filter_data, filter_height, filter_width,
output_depth, stride_height, stride_width, pad_height, pad_width,
padding, output_data, output_height, output_width);
optimized_ops::AddBiasAndEvalActivationFunction(
output_activation_min, output_activation_max, bias_shape, bias_data,
output_shape, output_data);
}
} // namespace multithreaded_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_MULTITHREADED_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/multithreaded_conv.h | C++ | apache-2.0 | 8,208 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
#if defined(__ARM_NEON__) || defined(__ARM_NEON)
#define USE_NEON
#include <arm_neon.h>
#endif
#if defined __GNUC__ && defined __SSE4_1__ && !defined TF_LITE_DISABLE_X86_NEON
#define USE_NEON
#include "NEON_2_SSE.h"
#endif
// NEON_OR_PORTABLE(SomeFunc, args) calls NeonSomeFunc(args) if USE_NEON is
// defined, PortableSomeFunc(args) otherwise.
#ifdef USE_NEON
// Always use Neon code
#define NEON_OR_PORTABLE(funcname, ...) Neon##funcname(__VA_ARGS__)
#else
// No NEON available: Use Portable code
#define NEON_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
#endif // defined(USE_NEON)
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_CHECK_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/neon_check.h | C | apache-2.0 | 1,473 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <sys/types.h>
#include <algorithm>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstdlib>
#include <cstring>
#include <limits>
#include <utility>
#include "ruy/ruy.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_tensor_utils_impl.h"
#ifdef USE_NEON
// aligned_alloc is available (via cstdlib/stdlib.h) with C++17/C11.
#if __cplusplus >= 201703L || __STDC_VERSION__ >= 201112L
#if !defined(__ANDROID__) || __ANDROID_API__ >= 28
// Neither Apple nor Windows provide aligned_alloc.
#if !defined(__APPLE__) && !defined(_WIN32)
#define TFLITE_USE_STD_ALIGNED_ALLOC
#endif
#endif
#endif
// Note: This is the same as ABSL_HAVE_BUILTIN, but can't include the header.
#ifdef __has_builtin
#define TFLITE_HAS_BUILTIN(x) __has_builtin(x)
#else
#define TFLITE_HAS_BUILTIN(x) 0
#endif
// Note: This is the same as ABSL_PREDICT_FALSE, but can't include the header.
#if TFLITE_HAS_BUILTIN(__builtin_expect) || \
(defined(__GNUC__) && !defined(__clang__))
#define TFLITE_UNLIKELY(x) (__builtin_expect(false || (x), false))
#else
#define TFLITE_UNLIKELY(x) (x)
#endif
namespace tflite {
namespace tensor_utils {
namespace {
constexpr int kFloatValuesPerNeonVector = 4;
constexpr int kInt16ValuesPerNeonVector = 8;
constexpr int kInt8ValuesPerNeonVector = 16;
constexpr int kNeonVectorAlignment = 4;
template <int PerNeonSize>
inline int RoundDownVectors(int size) {
return size & ~(PerNeonSize - 1);
}
// Allocates, at least, size bytes of uninitialized storage whose alignment is
// specified by alignment. The size parameter must be an integral multiple of
// alignment.
// Caller is responsible by freeing the allocated memory by calling free on
// the passed freeing_buffer pointer.
inline void* aligned_alloc(size_t alignment, size_t size,
void** freeing_buffer) {
#ifdef TFLITE_USE_STD_ALIGNED_ALLOC
*freeing_buffer = ::aligned_alloc(
alignment, (size + alignment - 1) / alignment * alignment);
return *freeing_buffer;
#else
*freeing_buffer = malloc(size + alignment);
const size_t offset = ((uintptr_t)*freeing_buffer) % alignment; // NOLINT
return offset == 0
? *freeing_buffer
: ((char*)*freeing_buffer + (alignment - offset)); // NOLINT
#endif
}
bool HasSdotInstruction() {
static const bool has_dotprod = DetectArmNeonDotprod();
return has_dotprod;
}
inline float AccumulateNeonLane(const float32x4_t lane) {
#ifdef __aarch64__
return vaddvq_f32(lane);
#else
return vgetq_lane_f32(lane, 0) + vgetq_lane_f32(lane, 1) +
vgetq_lane_f32(lane, 2) + vgetq_lane_f32(lane, 3);
#endif
}
// Empirically determined breakpoints on when to use CpuBackendGemm vs.
// standard MatrixBatchVectorMultiplyAccumulate. Briefly, if the batch size
// is above 8 and the device does not have sdot, use CpuBackendGemm. Otherwise,
// for large batch sizes, it makes sense to use CpuBackendGemm if the matrix
// is not extremely rectangular.
bool UseCpuBackendGemm(int rows, int cols, int batch) {
if (!HasSdotInstruction()) {
return batch >= 8;
}
if (batch < 16) {
return false;
}
constexpr int kCpuBackendGemmThreshold = 2;
// Calculate "rectangularness" as a measure of how far from square the
// the LHS matrix is.
int row_rect = rows / cols;
int col_rect = cols / rows;
int rectangularness_lg2 =
row_rect > 0 ? FloorLog2(row_rect) : FloorLog2(col_rect);
int batch_lg2 = FloorLog2(batch);
// Large batch sizes move us above the threshold, but can be offset
// by significant rectangularness.
int batch_lg2_minus_rect_lg2 = batch_lg2 - rectangularness_lg2;
return batch_lg2_minus_rect_lg2 > kCpuBackendGemmThreshold;
}
inline int32_t AccumulateNeonLane(const int32x4_t lane) {
#ifdef __aarch64__
return vaddvq_s32(lane);
#else
int64x2_t pairwiseAdded = vpaddlq_s32(lane);
return vgetq_lane_s64(pairwiseAdded, 0) + vgetq_lane_s64(pairwiseAdded, 1);
#endif
}
inline int32x4x2_t MultiplyByQuantizedMultiplier2Rows(
int32x4x2_t input_val, int32 quantized_multiplier, int shift) {
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
const int left_shift = shift > 0 ? shift : 0;
const int right_shift = shift > 0 ? 0 : -shift;
int32x4x2_t result;
// The vector type support for SaturatingRoundingDoublingHighMulth in gemmlowp
// is limited to NEON.
#ifdef GEMMLOWP_NEON
const int32x4_t left_shifted_one_dup = vdupq_n_s32(1 << left_shift);
result.val[0] =
RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
vmulq_s32(input_val.val[0], left_shifted_one_dup),
quantized_multiplier),
right_shift);
result.val[1] =
RoundingDivideByPOT(SaturatingRoundingDoublingHighMul(
vmulq_s32(input_val.val[1], left_shifted_one_dup),
quantized_multiplier),
right_shift);
#else
for (int i = 0; i < 2; ++i) {
int32_t vals[4];
vals[0] = RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(
vgetq_lane_s32(input_val.val[i], 0) * (1 << left_shift),
quantized_multiplier),
right_shift);
vals[1] = RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(
vgetq_lane_s32(input_val.val[i], 1) * (1 << left_shift),
quantized_multiplier),
right_shift);
vals[2] = RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(
vgetq_lane_s32(input_val.val[i], 2) * (1 << left_shift),
quantized_multiplier),
right_shift);
vals[3] = RoundingDivideByPOT(
SaturatingRoundingDoublingHighMul(
vgetq_lane_s32(input_val.val[i], 3) * (1 << left_shift),
quantized_multiplier),
right_shift);
result.val[i] = vld1q_s32(reinterpret_cast<int32_t*>(&vals));
}
#endif
return result;
}
} // namespace
void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
int m_cols, const float* vector,
int n_batch, float* result) {
// If v_size is not divisible by the vector size, then we need to process the
// final few elements sequentially. postamble_start shows the start index
// where this should happen.
const int postamble_start =
RoundDownVectors<kFloatValuesPerNeonVector>(m_cols);
for (int b = 0; b < n_batch; b++) {
float* result_in_batch = result + b * m_rows;
const float* vector_in_batch = vector + b * m_cols;
const float* matrix_row = matrix;
// Main matrix by vector multiplication loop
for (int r = 0; r < m_rows; r++) {
float32x4_t acc_32x4 = vmovq_n_f32(0.0);
int c = 0;
for (; c < postamble_start; c += kFloatValuesPerNeonVector) {
// Load 4 float values from vector and matrix row.
float32x4_t vector_f32x4 = vld1q_f32(vector_in_batch + c);
float32x4_t matrix_f32x4 = vld1q_f32(matrix_row + c);
// Multiply the vector and matrix row and add to accumulator.
acc_32x4 = vmlaq_f32(acc_32x4, matrix_f32x4, vector_f32x4);
}
// Add the 4 intermediate sum values to get the final dot-prod value for
// this column.
*result_in_batch += AccumulateNeonLane(acc_32x4);
for (; TFLITE_UNLIKELY(c < m_cols); c++) {
*result_in_batch += matrix_row[c] * vector_in_batch[c];
}
matrix_row += m_cols;
++result_in_batch;
}
}
}
#ifdef __aarch64__
// We interleave vector data to make the dot product logic more efficient.
// Suppose that vectors is:
// a0 a1 a2 a3 a4 a5 ...
// b0 b1 b2 b3 b4 b5 ...
// c0 c1 c2 c3 c4 c5 ...
// d0 d1 d2 d3 d4 d5 ...
// e0 e1 e2 e3 e4 e5 ...
// This code interleaves them like this:
// a0 a1 a2 a3 b0 b1 b2 b3 c0 c1 c2 c3 d0 d1 d2 d3 a4 a5 a6 a7 b4 ...
// e0 e1 e2 e3 f0 f1 f2 f3 ...
// Once the data is interleaved, each 16-byte read from the vectors pointer
// contains 4 bytes from each of 4 vectors.
const int8_t* ShuffleVectors(const int8_t* vectors, const int n_batch,
const int m_cols, void** shuffled_vectors_free) {
int8* shuffled_vectors = reinterpret_cast<int8*>(aligned_alloc(
kNeonVectorAlignment, n_batch * m_cols, shuffled_vectors_free));
for (int i = 0; i < n_batch; i += 4) {
int8* shuffled_vectors_ptr = shuffled_vectors + (i * m_cols);
const int8* unshuffled_vec0_ptr =
reinterpret_cast<const int8*>(vectors) + (i * m_cols);
const int8* unshuffled_vec1_ptr =
reinterpret_cast<const int8*>(vectors) + ((i + 1) * m_cols);
const int8* unshuffled_vec2_ptr =
reinterpret_cast<const int8*>(vectors) + ((i + 2) * m_cols);
const int8* unshuffled_vec3_ptr =
reinterpret_cast<const int8*>(vectors) + ((i + 3) * m_cols);
const int8* const end_vec0_ptr = unshuffled_vec1_ptr;
while (unshuffled_vec0_ptr != end_vec0_ptr) {
asm volatile(
// This code path requires that (n_cols % 16) == 0 so we can safely
// read in 16-byte chunks from each row.
"ld1 {v0.16b}, [%[unshuffled_vec0_ptr]], #16\n"
"ld1 {v1.16b}, [%[unshuffled_vec1_ptr]], #16\n"
"ld1 {v2.16b}, [%[unshuffled_vec2_ptr]], #16\n"
"ld1 {v3.16b}, [%[unshuffled_vec3_ptr]], #16\n"
"st4 {v0.s, v1.s, v2.s, v3.s}[0], [%[shuffled_vectors_ptr]], #16\n"
"st4 {v0.s, v1.s, v2.s, v3.s}[1], [%[shuffled_vectors_ptr]], #16\n"
"st4 {v0.s, v1.s, v2.s, v3.s}[2], [%[shuffled_vectors_ptr]], #16\n"
"st4 {v0.s, v1.s, v2.s, v3.s}[3], [%[shuffled_vectors_ptr]], #16\n"
: [unshuffled_vec0_ptr] "+r"(unshuffled_vec0_ptr),
[unshuffled_vec1_ptr] "+r"(unshuffled_vec1_ptr),
[unshuffled_vec2_ptr] "+r"(unshuffled_vec2_ptr),
[unshuffled_vec3_ptr] "+r"(unshuffled_vec3_ptr),
[shuffled_vectors_ptr] "+r"(shuffled_vectors_ptr)
:
: "v0", "v1", "v2", "v3", "cc", "memory");
}
}
return reinterpret_cast<const int8_t*>(shuffled_vectors);
}
// Notes about the speed of this version vs. the baseline (from memory):
// - With 256K of L1, we can keep a lot of vectors in cache.
// I recall a reasonable speedup just by rearranging the loop to have
// row on the outside and batch on the inside.
// - I also recall getting a nice speedup from sdot.
// - I tried many times to do better than the current implementation, using
// loop unrolling and instruction reordering to avoid stalls, etc.
// but I was not able to do significantly better. This code is, however,
// much worse than what the processor spec sheet suggests is possible.
static void DotprodMatrixBatchFourVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* vectors, const float* scaling_factors, int n_batch,
float* __restrict__ result) {
void* shuffled_vectors_free;
const int8_t* shuffled_vectors =
ShuffleVectors(vectors, n_batch, m_cols, &shuffled_vectors_free);
for (int row = 0; row < m_rows; row += 2) {
for (int batch = 0; batch < n_batch; batch += 4) {
float* result_ptr = result + (batch * m_rows) + row;
const int8* mat_ptr0 = matrix + (row * m_cols);
const int8* mat_ptr1 = matrix + ((row + 1) * m_cols);
const int8* mat_ptr0_end = mat_ptr1;
const int8* vec_ptr = shuffled_vectors + (batch * m_cols);
const float* scaling_factors_ptr = scaling_factors + batch;
const uint64_t wide_rows = m_rows * sizeof(float);
const int8* mat_ptr2 = matrix + ((row + 2) * m_cols);
const int8* mat_ptr3 = matrix + ((row + 3) * m_cols);
asm volatile(
// Zero out the accumulator registers.
"movi v0.4s, #0\n"
"movi v1.4s, #0\n"
"movi v2.4s, #0\n"
"movi v3.4s, #0\n"
"1:\n" // batch_cols_loop
// Read 16 more bytes from a pair of matrix rows.
"ld1 {v12.16b}, [%[mat_ptr0]], #16\n"
// Prefetch two rows ahead.
"prfm pldl1strm, [%[mat_ptr2]]\n"
"prfm pldl1strm, [%[mat_ptr3]]\n"
// Read from input vectors 4 times; 64 bytes total.
// Each 16-byte register contains parts of 4 vectors; see the
// shuffle logic above.
// From Benoit, places to look in the future:
// - Move load instructions further from sdot
// - Switch loop use-then-reload
// - Do partial unrolling to use register space better
"ld1 {v8.16b}, [%[vec_ptr]], #16\n"
".word 0x4f8ce100 // sdot v0.4s, v8.16b, v12.4b[0]\n"
"ld1 {v9.16b}, [%[vec_ptr]], #16\n"
".word 0x4face121 // sdot v1.4s, v9.16b, v12.4b[1]\n"
"ld1 {v10.16b}, [%[vec_ptr]], #16\n"
".word 0x4f8ce940 // sdot v0.4s, v10.16b, v12.4b[2]\n"
"ld1 {v11.16b}, [%[vec_ptr]], #16\n"
".word 0x4face961 // sdot v1.4s, v11.16b, v12.4b[3]\n"
// Update prefetch pointers.
"add %[mat_ptr2], %[mat_ptr2], #16\n"
"add %[mat_ptr3], %[mat_ptr3], #16\n"
// Re-use those vectors for the next row as well.
"ld1 {v13.16b}, [%[mat_ptr1]], #16\n"
".word 0x4f8de102 // sdot v2.4s, v8.16b, v13.4b[0]\n"
".word 0x4fade123 // sdot v3.4s, v9.16b, v13.4b[1]\n"
".word 0x4f8de942 // sdot v2.4s, v10.16b, v13.4b[2]\n"
".word 0x4fade963 // sdot v3.4s, v11.16b, v13.4b[3]\n"
// If we're not done with these rows, continue.
"cmp %[mat_ptr0], %[mat_ptr0_end]\n"
"bne 1b\n" // batch_cols_loop
// Done with the rows, sum the results.
"add v0.4s, v0.4s, v1.4s\n"
"add v2.4s, v2.4s, v3.4s\n"
// Convert the per-vector sums to floating point.
"scvtf v0.4s, v0.4s\n"
"scvtf v1.4s, v2.4s\n"
// Fetch scale factors.
"ld1 {v4.4s}, [%[scaling_factors_ptr]]\n"
// Multiply scale factors times sums.
"fmul v0.4s, v4.4s, v0.4s\n"
"fmul v1.4s, v4.4s, v1.4s\n"
// Load previous result values.
// The result position is:
// result[batch * m_rows + row]
// Here that is factored into:
// result_ptr = result + row
// *result_ptr = res[0]
// (uint8*)result_ptr += (m_rows * sizeof(float))
// *result_ptr = res[1]
// ...
// Since we're reading two rows at a time, though, we read both
// result[batch * m_rows + row]
// and
// result[batch * m_rows + row + 1]
"ld2 {v9.s, v10.s}[0], [%[result_ptr]], %[wide_rows]\n"
"ld2 {v9.s, v10.s}[1], [%[result_ptr]], %[wide_rows]\n"
"ld2 {v9.s, v10.s}[2], [%[result_ptr]], %[wide_rows]\n"
"ld2 {v9.s, v10.s}[3], [%[result_ptr]], %[wide_rows]\n"
// Go back to the starting position (subtract wide_rows * 4).
"sub %[result_ptr], %[result_ptr], %[wide_rows], lsl #2\n"
// Add previous result values.
"fadd v9.4s, v9.4s, v0.4s\n"
"fadd v10.4s, v10.4s, v1.4s\n"
// Store results.
"st2 {v9.s, v10.s}[0], [%[result_ptr]], %[wide_rows]\n"
"st2 {v9.s, v10.s}[1], [%[result_ptr]], %[wide_rows]\n"
"st2 {v9.s, v10.s}[2], [%[result_ptr]], %[wide_rows]\n"
"st2 {v9.s, v10.s}[3], [%[result_ptr]], %[wide_rows]\n"
: [mat_ptr0] "+r"(mat_ptr0), [mat_ptr1] "+r"(mat_ptr1),
[vec_ptr] "+r"(vec_ptr), [result_ptr] "+r"(result_ptr),
[mat_ptr2] "+r"(mat_ptr2), [mat_ptr3] "+r"(mat_ptr3)
: [mat_ptr0_end] "r"(mat_ptr0_end),
[scaling_factors_ptr] "r"(scaling_factors_ptr),
[wide_rows] "r"(wide_rows)
: "x0", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "cc", "memory");
}
}
free(shuffled_vectors_free);
}
static void DotprodMatrixBatchFourVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* vectors, const float* scaling_factors, int n_batch,
float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* row_sums) {
void* shuffled_vectors_free;
const int8_t* shuffled_vectors =
ShuffleVectors(vectors, n_batch, m_cols, &shuffled_vectors_free);
for (int row = 0; row < m_rows; row += 2) {
for (int batch = 0; batch < n_batch; batch += 4) {
const float* channel_scales_ptr = per_channel_scale + row;
int32_t* row_sums_ptr = row_sums ? row_sums + row : nullptr;
float* result_ptr = result + (batch * m_rows) + row;
const int8* mat_ptr0 = matrix + (row * m_cols);
const int8* mat_ptr1 = matrix + ((row + 1) * m_cols);
const int8* mat_ptr0_end = mat_ptr1;
const int8* vec_ptr = shuffled_vectors + (batch * m_cols);
const float* scaling_factors_ptr = scaling_factors + batch;
const uint64_t wide_rows = m_rows * sizeof(float);
const int32_t* batch_offsets_ptr = input_offset + batch;
const int32_t is_channel_scale_nullptr = per_channel_scale == nullptr;
const int32_t is_row_sums_nullptr = row_sums_ptr == nullptr;
asm volatile(
"movi v0.4s, #0\n"
"movi v1.4s, #0\n"
"movi v2.4s, #0\n"
"movi v3.4s, #0\n"
// Load zero points.
"ld1 {v7.4s}, [%[batch_offsets_ptr]]\n"
"ld1 {v4.4s}, [%[scaling_factors_ptr]]\n"
// Zero out zero point accumulators.
"movi v14.4s, #0\n"
"movi v15.4s, #0\n"
// Load per channel scales if not null.
"cmp %w[is_channel_scale_nullptr], #0\n"
"bne 1f\n"
"ld1r {v16.4s}, [%[channel_scales_ptr]], #4\n"
"ld1r {v17.4s}, [%[channel_scales_ptr]]\n"
"fmul v16.4s, v16.4s, v4.4s\n"
"fmul v17.4s, v17.4s, v4.4s\n"
"b 2f\n"
"1:\n"
"mov v16.16b, v4.16b\n"
"mov v17.16b, v4.16b\n"
"2:\n"
"ld1 {v12.16b}, [%[mat_ptr0]], #16\n"
"ld1 {v8.16b}, [%[vec_ptr]], #16\n"
".word 0x4f8ce100 // sdot v0.4s, v8.16b, v12.4b[0]\n"
"ld1 {v9.16b}, [%[vec_ptr]], #16\n"
".word 0x4face121 // sdot v1.4s, v9.16b, v12.4b[1]\n"
"ld1 {v10.16b}, [%[vec_ptr]], #16\n"
".word 0x4f8ce940 // sdot v0.4s, v10.16b, v12.4b[2]\n"
"ld1 {v11.16b}, [%[vec_ptr]], #16\n"
".word 0x4face961 // sdot v1.4s, v11.16b, v12.4b[3]\n"
"ld1 {v13.16b}, [%[mat_ptr1]], #16\n"
".word 0x4f8de102 // sdot v2.4s, v8.16b, v13.4b[0]\n"
".word 0x4fade123 // sdot v3.4s, v9.16b, v13.4b[1]\n"
".word 0x4f8de942 // sdot v2.4s, v10.16b, v13.4b[2]\n"
".word 0x4fade963 // sdot v3.4s, v11.16b, v13.4b[3]\n"
"cmp %w[is_row_sums_nullptr], #1\n"
"bne 3f\n"
// Accumulate row_sums for zero point calculations.
"saddlp v12.8h, v12.16b\n"
"saddlp v13.8h, v13.16b\n"
"sadalp v14.4s, v12.8h\n"
"sadalp v15.4s, v13.8h\n"
"3:\n"
"cmp %[mat_ptr0], %[mat_ptr0_end]\n"
"bne 2b\n"
"add v0.4s, v0.4s, v1.4s\n"
"add v2.4s, v2.4s, v3.4s\n"
"cmp %w[is_row_sums_nullptr], #1\n"
"bne 4f\n"
// Calculate zero point offsets.
"addv s14, v14.4s\n"
"addv s15, v15.4s\n"
"dup v14.4s, v14.s[0]\n"
"dup v15.4s, v15.s[0]\n"
"b 5f\n"
"4:\n"
"ld1r {v14.4s}, [%[row_sums_ptr]], #4\n"
"ld1r {v15.4s}, [%[row_sums_ptr]]\n"
"5:\n"
"mul v14.4s, v14.4s, v7.4s\n"
"mul v15.4s, v15.4s, v7.4s\n"
"sub v0.4s, v0.4s, v14.4s\n"
"sub v2.4s, v2.4s, v15.4s\n"
"scvtf v0.4s, v0.4s\n"
"scvtf v1.4s, v2.4s\n"
// Multiply scale.
"fmul v0.4s, v16.4s, v0.4s\n"
"fmul v1.4s, v17.4s, v1.4s\n"
"ld2 {v9.s, v10.s}[0], [%[result_ptr]], %[wide_rows]\n"
"ld2 {v9.s, v10.s}[1], [%[result_ptr]], %[wide_rows]\n"
"ld2 {v9.s, v10.s}[2], [%[result_ptr]], %[wide_rows]\n"
"ld2 {v9.s, v10.s}[3], [%[result_ptr]], %[wide_rows]\n"
"sub %[result_ptr], %[result_ptr], %[wide_rows], lsl #2\n"
"fadd v9.4s, v9.4s, v0.4s\n"
"fadd v10.4s, v10.4s, v1.4s\n"
"st2 {v9.s, v10.s}[0], [%[result_ptr]], %[wide_rows]\n"
"st2 {v9.s, v10.s}[1], [%[result_ptr]], %[wide_rows]\n"
"st2 {v9.s, v10.s}[2], [%[result_ptr]], %[wide_rows]\n"
"st2 {v9.s, v10.s}[3], [%[result_ptr]], %[wide_rows]\n"
: [mat_ptr0] "+r"(mat_ptr0), [mat_ptr1] "+r"(mat_ptr1),
[vec_ptr] "+r"(vec_ptr), [result_ptr] "+r"(result_ptr),
[row_sums_ptr] "+r"(row_sums_ptr),
[channel_scales_ptr] "+r"(channel_scales_ptr)
: [mat_ptr0_end] "r"(mat_ptr0_end),
[scaling_factors_ptr] "r"(scaling_factors_ptr),
[wide_rows] "r"(wide_rows),
[batch_offsets_ptr] "r"(batch_offsets_ptr),
[is_channel_scale_nullptr] "r"(is_channel_scale_nullptr),
[is_row_sums_nullptr] "r"(is_row_sums_nullptr)
: "x0", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9",
"v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "w0", "w1",
"cc", "memory");
}
}
free(shuffled_vectors_free);
}
static void DotprodMatrixBatchFourVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* vectors, const float* scaling_factors, int n_batch,
float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset) {
DotprodMatrixBatchFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
per_channel_scale, input_offset, nullptr);
}
// The DotprodMatrixBatchFourVectorMultiplyAccumulate kernel processes 4
// vectors in the same time as the baseline processes 1 vector. However, it
// requires 4 vectors of input.
//
// To take advantage of this speed difference, we add some zero-valued
// vectors to the batch so that n_batch is a multiple of 4. Then we execute
// DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate on that padded batch,
// then extract just the results we want at the end (ignoring the extra padding
// outputs).
//
// The relative cost of the padding is large when the matrix is smaller than
// 128x128, so we don't use this code path on small matrices. On larger
// matrices, the computation cost dwarfs the padding cost, making this code
// viable.
//
// If we ignore the cost of padding, this kernel is:
// 1x the speed of NeonMatrixBatchVectorMultiplyImpl for n_batch = 1
// 2x the speed of NeonMatrixBatchVectorMultiplyImpl for n_batch = 2
// 3x the speed of NeonMatrixBatchVectorMultiplyImpl for n_batch = 3
// ...
//
// We don't use this kernel when n_batch = 1 because the baseline kernel
// is fine for that case.
void DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* vectors, const float* scaling_factors, int n_batch,
float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* row_sums) {
// Round to the nearest multiple of 4.
int batch_round_up = n_batch;
if (n_batch % 4 != 0) {
batch_round_up += (4 - n_batch % 4);
}
TFLITE_CHECK_LE(n_batch, batch_round_up);
void* padded_vectors_free;
const int padded_vectors_size = batch_round_up * m_cols;
int8_t* padded_vectors = reinterpret_cast<int8_t*>(aligned_alloc(
kNeonVectorAlignment, padded_vectors_size, &padded_vectors_free));
memset(padded_vectors, 0, padded_vectors_size);
void* padded_result_free;
const int result_size = n_batch * m_rows * sizeof(float);
const int padded_result_size = batch_round_up * m_rows * sizeof(float);
float* padded_result = reinterpret_cast<float*>(aligned_alloc(
kNeonVectorAlignment, padded_result_size, &padded_result_free));
memcpy(padded_result, result, result_size);
memset(reinterpret_cast<char*>(padded_result) + result_size, 0,
padded_result_size - result_size);
// Copy the input into the padded data structure.
TFLITE_CHECK_LE(n_batch * m_cols, padded_vectors_size);
memcpy(padded_vectors, vectors, n_batch * m_cols);
void* padded_scaling_factors_free;
const int padded_scaling_factors_size = batch_round_up * sizeof(float);
float* padded_scaling_factors = reinterpret_cast<float*>(
aligned_alloc(kNeonVectorAlignment, padded_scaling_factors_size,
&padded_scaling_factors_free));
TFLITE_CHECK_LE(n_batch * sizeof(float), padded_scaling_factors_size);
TFLITE_CHECK_LE(batch_round_up * sizeof(float), padded_scaling_factors_size);
memset(padded_scaling_factors, 0, batch_round_up * sizeof(float));
memcpy(padded_scaling_factors, scaling_factors, n_batch * sizeof(float));
if (input_offset != nullptr) {
void* padded_input_offset_free;
const int padded_input_offset_size = batch_round_up * sizeof(int32_t);
int32_t* padded_input_offset = reinterpret_cast<int32_t*>(
aligned_alloc(kNeonVectorAlignment, padded_input_offset_size,
&padded_input_offset_free));
TFLITE_CHECK_LE(n_batch * sizeof(int32_t), padded_input_offset_size);
TFLITE_CHECK_LE(batch_round_up * sizeof(int32_t), padded_input_offset_size);
memset(padded_input_offset, 0, batch_round_up * sizeof(int32_t));
memcpy(padded_input_offset, input_offset, n_batch * sizeof(int32_t));
// Call the main kernel.
DotprodMatrixBatchFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, padded_vectors, padded_scaling_factors,
batch_round_up, padded_result, per_channel_scale, padded_input_offset,
row_sums);
free(padded_input_offset_free);
} else {
// Call the main kernel.
DotprodMatrixBatchFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, padded_vectors, padded_scaling_factors,
batch_round_up, padded_result);
}
memcpy(result, padded_result, result_size);
free(padded_result_free);
free(padded_vectors_free);
free(padded_scaling_factors_free);
}
void DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* vectors, const float* scaling_factors, int n_batch,
float* __restrict__ result) {
DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
/*per_channel_scale=*/nullptr, /*input_offset=*/nullptr,
/*row_sums=*/nullptr);
}
static void DotprodSparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
const int m_cols, const int8_t* __restrict__ vectors,
const float* scaling_factors, int n_batch, float* __restrict__ result) {
const uint8_t* ledger_ptr = ledger;
const int8* mat_ptr = matrix;
for (int row = 0; row < m_rows; row++) {
int num_nonzero_chunks = *ledger_ptr;
ledger_ptr++;
const uint8* ledger_start = ledger_ptr;
const uint8* ledger_end = ledger_ptr + num_nonzero_chunks;
const int8* mat_start = mat_ptr;
for (int batch = 0; batch < n_batch; batch++) {
const int8* vec_ptr = vectors + (batch * m_cols);
int64_t row_sum = 0;
mat_ptr = mat_start;
ledger_ptr = ledger_start;
if (ledger_ptr != ledger_end) {
asm volatile(
"movi v0.4s, #0\n"
"movi v1.4s, #0\n"
"movi v8.4s, #0\n"
"mov x7, 0\n"
"1:\n" // chunks_loop
// Single matrix chunk, 16 bytes
"ld1 {v8.16b}, [%[mat_ptr]], #16\n"
// Read the next ledger index and increment.
"ldrb w7, [%[ledger_ptr]], #1\n"
// Read 16 bytes of vector data from (vec_ptr + (ledger_index * 16))
"add x8, %[vec_ptr], x7, lsl #4\n"
"ld1 {v9.16b}, [x8]\n"
// Dot product of matrix row and vector.
".word 0x4e889520 // sdot v0.4s, v9.16b, v8.16b\n"
"cmp %[ledger_ptr], %[ledger_end]\n"
"blt 1b\n" // chunks_loop
// Sum the 4 vector components into a 32-bit value.
"addv s1, v0.4s\n"
// row_sum is 64-bit, so we copy 64 bits of v1 into it.
// We have to be careful to cast this value to 32 bits in order
// to interpret the sign bit properly.
"mov %[row_sum], v1.d[0]\n"
: [row_sum] "=r"(row_sum), [ledger_ptr] "+r"(ledger_ptr),
[mat_ptr] "+r"(mat_ptr), [vec_ptr] "+r"(vec_ptr)
: [ledger_end] "r"(ledger_end)
: "x0", "x1", "x7", "x8", "v0", "v1", "v8", "v9", "cc", "memory");
}
result[batch * m_rows + row] +=
static_cast<int32>(row_sum) * scaling_factors[batch];
}
}
}
#endif // __aarch64__
void NeonMatrixBatchVectorMultiplyImpl(const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights,
int32_t n_batch, int32_t n_input,
int32_t n_output, int32_t output_zp,
int32_t* scratch) {
// Assuming *matrix is kNeonVectorAlignment-byte aligned, every row of the
// matrix is also kNeonVectorAlignment-byte aligned as long as cols is a
// multiple of kNeonVectorAlignment. The assumption is currently satisfied by
// TFLite's 16-byte memory alignment scheme.
//
// Otherwise, we allocate an aligned memory block and set
// a flag to later copy rows from matrix to the block
// for aligned multiplication.
bool unaligned = false;
int8_t* aligned_row = nullptr;
void* aligned_row_free = nullptr;
if ((n_input & (kNeonVectorAlignment - 1)) != 0) {
unaligned = true;
aligned_row =
(int8_t*)aligned_alloc(kNeonVectorAlignment, n_input, // NOLINT
&aligned_row_free);
}
void* aligned_vec_free = nullptr;
int8_t* aligned_vec =
(int8_t*)aligned_alloc(kNeonVectorAlignment, n_input, // NOLINT
&aligned_vec_free);
// If m_cols is not at least kInt8ValuesPerNeonVector, we cannot use the main
// vectorized loop, and we need to process sequentially. postamble_half_start
// shows the start index where this should happen. Between postamble_start and
// postamble_half_start we can still process kInt8ValuesPerNeonVector/2 in a
// vectorized form.
const int postamble_half_start =
RoundDownVectors<kInt8ValuesPerNeonVector>(n_input);
const int postamble_start =
RoundDownVectors<(kInt8ValuesPerNeonVector / 2)>(n_input);
for (int batch = 0; batch < n_batch; ++batch) {
// Copy the vector data to an aligned vector.
memcpy(aligned_vec, input + batch * n_input, sizeof(int8_t) * n_input);
// Compute dot-product for every column.
for (int row = 0; row < n_output; ++row) {
// Get the address of the first element of the row.
int8_t* row_ptr =
(int8_t*)input_to_gate_weights + row * n_input; // NOLINT
if (unaligned) {
memcpy(aligned_row, row_ptr, sizeof(int8_t) * n_input);
row_ptr = aligned_row;
}
// Initialize the dot product sum for the row to 0.
int32x4_t dotprod_32x4 = vmovq_n_s32(0);
// For every block of 16 8-bit elements.
int col = 0;
for (; col < postamble_half_start; col += kInt8ValuesPerNeonVector) {
// Load 16 8-bit values from the row and vector, each, to operate on.
// Here the assumption is that each buffer is 4-byte aligned. Otherwise,
// performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr[col]) & (kNeonVectorAlignment - 1), 0);
const int8x16_t s1_8x16 = vld1q_s8((const int8_t*)(aligned_vec + col));
const int8x16_t s2_8x16 = vld1q_s8((const int8_t*)(row_ptr + col));
// Multiply the low bits (i.e. the lower 8 8bit numbers in the
// registers).
int16x8_t prod_16x8 =
vmull_s8(vget_low_s8(s1_8x16), vget_low_s8(s2_8x16));
// Multiply the high bits (i.e. the higher 8 8bit numbers in the
// registers), and accumulate with the result of the low bits product.
// The assumption here is that overflow will not happen as we quantize
// our values to be in the range [-127, 127]. As such the sum of the 2
// products is always strictly smaller than 15-bits (32767 in absolute
// value).
prod_16x8 =
vmlal_s8(prod_16x8, vget_high_s8(s1_8x16), vget_high_s8(s2_8x16));
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
} // for col
// Half iteration dealing only 8 elements
if (TFLITE_UNLIKELY(col < postamble_start)) {
// Load 8 8-bit values from the row and column each to operate on.
// Here the assumption is that each buffer is 4-bytes aligned.
// Otherwise, performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr[col]) & (kNeonVectorAlignment - 1), 0);
const int8x8_t s1_8x8 = vld1_s8((const int8_t*)(aligned_vec + col));
const int8x8_t s2_8x8 = vld1_s8((const int8_t*)(row_ptr + col));
const int16x8_t prod_16x8 = vmull_s8(s1_8x8, s2_8x8);
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
col += (kInt8ValuesPerNeonVector >> 1);
}
// Add the 4 intermediate sum values to get the final dot-prod value for
// this row.
int32_t dotprod = AccumulateNeonLane(dotprod_32x4);
// Postamble loop.
for (; TFLITE_UNLIKELY(col < n_input); ++col) {
dotprod += row_ptr[col] * aligned_vec[col];
} // for col
dotprod += bias[row];
scratch[batch * n_output + row] = dotprod;
} // for row
} // for batch
if (unaligned) {
free(aligned_row_free);
}
free(aligned_vec_free);
}
inline void NeonMatrixBatchVectorAccumulateImpl(
int32_t multiplier, int32_t shift, int32_t n_batch, int32_t n_output,
int32_t output_zp, int32_t* scratch, int16_t* output) {
int i = 0;
const int total_size = n_batch * n_output;
const int32_t output_min = std::numeric_limits<int16_t>::min();
const int32_t output_max = std::numeric_limits<int16_t>::max();
const int32x4_t output_zp_dup = vdupq_n_s32(output_zp);
const int32x4_t max_val_dup = vdupq_n_s32(output_max);
const int32x4_t min_val_dup = vdupq_n_s32(output_min);
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
for (; i <= total_size - 8; i += 8) {
int32x4x2_t scratch_val;
scratch_val.val[0] = vld1q_s32(scratch + i);
scratch_val.val[1] = vld1q_s32(scratch + i + 4);
const int16x8_t output_val = vld1q_s16(output + i);
const int32x4_t first_half = vmovl_s16(vget_low_s16(output_val));
const int32x4_t second_half = vmovl_s16(vget_high_s16(output_val));
int32x4x2_t temp_val =
MultiplyByQuantizedMultiplier2Rows(scratch_val, multiplier, shift);
temp_val.val[0] =
vaddq_s32(vaddq_s32(temp_val.val[0], first_half), output_zp_dup);
temp_val.val[1] =
vaddq_s32(vaddq_s32(temp_val.val[1], second_half), output_zp_dup);
temp_val.val[0] =
vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup);
temp_val.val[1] =
vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup);
const int16x8_t result =
vcombine_s16(vqmovn_s32(temp_val.val[0]), vqmovn_s32(temp_val.val[1]));
vst1q_s16(output + i, result);
}
for (; TFLITE_UNLIKELY(i < total_size); ++i) {
int32_t temp = MultiplyByQuantizedMultiplier(scratch[i], multiplier, shift);
temp += output_zp;
temp += output[i];
if (temp > output_max) {
temp = output_max;
}
if (temp < output_min) {
temp = output_min;
}
output[i] = static_cast<int16_t>(temp);
}
}
inline void NeonMatrixBatchVectorAccumulateImpl(
int32_t multiplier, int32_t shift, int32_t n_batch, int32_t n_output,
int32_t output_zp, int32_t* scratch, int8_t* output) {
int i = 0;
const int total_size = n_batch * n_output;
const int32_t output_min = std::numeric_limits<int8_t>::min();
const int32_t output_max = std::numeric_limits<int8_t>::max();
const int32x4_t output_zp_dup = vdupq_n_s32(output_zp);
const int32x4_t max_val_dup = vdupq_n_s32(output_max);
const int32x4_t min_val_dup = vdupq_n_s32(output_min);
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
for (; i <= total_size - 16; i += 16) {
int32x4x4_t scratch_val;
scratch_val.val[0] = vld1q_s32(scratch + i);
scratch_val.val[1] = vld1q_s32(scratch + i + 4);
scratch_val.val[2] = vld1q_s32(scratch + i + 8);
scratch_val.val[3] = vld1q_s32(scratch + i + 12);
const int8x16_t output_val = vld1q_s8(output + i);
const int16x8_t first_half = vmovl_s8(vget_low_s8(output_val));
const int16x8_t second_half = vmovl_s8(vget_high_s8(output_val));
const int32x4_t output_val_1 = vmovl_s16(vget_low_s16(first_half));
const int32x4_t output_val_2 = vmovl_s16(vget_high_s16(first_half));
const int32x4_t output_val_3 = vmovl_s16(vget_low_s16(second_half));
const int32x4_t output_val_4 = vmovl_s16(vget_high_s16(second_half));
int32x4x4_t temp_val =
MultiplyByQuantizedMultiplier4Rows(scratch_val, multiplier, shift);
temp_val.val[0] =
vaddq_s32(vaddq_s32(temp_val.val[0], output_val_1), output_zp_dup);
temp_val.val[1] =
vaddq_s32(vaddq_s32(temp_val.val[1], output_val_2), output_zp_dup);
temp_val.val[2] =
vaddq_s32(vaddq_s32(temp_val.val[2], output_val_3), output_zp_dup);
temp_val.val[3] =
vaddq_s32(vaddq_s32(temp_val.val[3], output_val_4), output_zp_dup);
temp_val.val[0] =
vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup);
temp_val.val[1] =
vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup);
temp_val.val[2] =
vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup);
temp_val.val[3] =
vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup);
const int16x8_t result_1 =
vcombine_s16(vqmovn_s32(temp_val.val[0]), vqmovn_s32(temp_val.val[1]));
const int16x8_t result_2 =
vcombine_s16(vqmovn_s32(temp_val.val[2]), vqmovn_s32(temp_val.val[3]));
const int8x16_t result =
vcombine_s8(vqmovn_s16(result_1), vqmovn_s16(result_2));
vst1q_s8(output + i, result);
}
for (; TFLITE_UNLIKELY(i < total_size); ++i) {
int32_t temp = MultiplyByQuantizedMultiplier(scratch[i], multiplier, shift);
temp += output_zp;
temp += output[i];
if (temp > output_max) {
temp = output_max;
}
if (temp < output_min) {
temp = output_min;
}
output[i] = static_cast<int8_t>(temp);
}
}
void NeonCpuBackendGemm(const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t n_batch,
int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, CpuBackendContext* context) {
using ::tflite::cpu_backend_gemm::Gemm;
using ::tflite::cpu_backend_gemm::GemmParams;
using ::tflite::cpu_backend_gemm::MatrixParams;
MatrixParams<int8_t> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = n_output;
lhs_params.cols = n_input;
lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup;
MatrixParams<int8_t> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = n_input;
rhs_params.cols = n_batch;
MatrixParams<int32_t> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = n_output;
dst_params.cols = n_batch;
GemmParams<int32, int32> gemm_params;
if (bias) {
gemm_params.bias = bias;
}
cpu_backend_gemm::Gemm(lhs_params, input_to_gate_weights, rhs_params, input,
dst_params, scratch, gemm_params, context);
}
void NeonMatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int16_t* output, CpuBackendContext* context) {
#ifdef TFLITE_WITH_RUY_GEMV
NeonCpuBackendGemm(input, bias, input_to_gate_weights, n_batch, n_input,
n_output, output_zp, scratch, context);
#else
NeonMatrixBatchVectorMultiplyImpl(input, bias, input_to_gate_weights, n_batch,
n_input, n_output, output_zp, scratch);
#endif
NeonMatrixBatchVectorAccumulateImpl(multiplier, shift, n_batch, n_output,
output_zp, scratch, output);
}
void NeonMatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int8_t* output, CpuBackendContext* context) {
#ifdef TFLITE_WITH_RUY_GEMV
NeonCpuBackendGemm(input, bias, input_to_gate_weights, n_batch, n_input,
n_output, output_zp, scratch, context);
#else
NeonMatrixBatchVectorMultiplyImpl(input, bias, input_to_gate_weights, n_batch,
n_input, n_output, output_zp, scratch);
#endif
NeonMatrixBatchVectorAccumulateImpl(multiplier, shift, n_batch, n_output,
output_zp, scratch, output);
}
void NeonMatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix,
const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* scaling_factors,
int n_batch,
float* __restrict__ result) {
#ifdef __aarch64__
if (HasSdotInstruction() && m_cols % 16 == 0 && m_rows % 2 == 0 &&
m_rows >= n_batch) {
if (n_batch % 4 == 0) {
// Benchmarks suggest that it's always better to use the batch code
// when we can, even on small matrices.
DotprodMatrixBatchFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result);
return;
} else if (n_batch >= 2 && m_rows * m_cols >= 128 * 128) {
DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result);
return;
}
}
#endif // __aarch64__
// Assuming *matrix is kNeonVectorAlignment-byte aligned, every row of the
// matrix is also kNeonVectorAlignment-byte aligned as long as cols is a
// multiple of kNeonVectorAlignment. The assumption is currently satisfied by
// TFLite's 16-byte memory alignment scheme.
//
// Otherwise, we allocate an aligned memory block and set
// a flag to later copy rows from matrix to the block
// for aligned multiplication.
bool unaligned = false;
int8_t* aligned_row = nullptr;
void* aligned_row_free = nullptr;
if ((m_cols & (kNeonVectorAlignment - 1)) != 0) {
unaligned = true;
aligned_row =
(int8_t*)aligned_alloc(kNeonVectorAlignment, m_cols, // NOLINT
&aligned_row_free);
}
void* aligned_vec_free = nullptr;
int8_t* aligned_vec =
(int8_t*)aligned_alloc(kNeonVectorAlignment, m_cols, // NOLINT
&aligned_vec_free);
// If m_cols is not at least kInt8ValuesPerNeonVector, we cannot use the main
// vectorized loop, and we need to process sequentially. postamble_half_start
// shows the start index where this should happen. Between postamble_start and
// postamble_half_start we can still process kInt8ValuesPerNeonVector/2 in a
// vectorized form.
const int postamble_half_start =
RoundDownVectors<kInt8ValuesPerNeonVector>(m_cols);
const int postamble_start =
RoundDownVectors<(kInt8ValuesPerNeonVector / 2)>(m_cols);
for (int batch = 0; batch < n_batch; ++batch) {
const float batch_scaling_factor = scaling_factors[batch];
// Copy the vector data to an aligned vector.
memcpy(aligned_vec, vectors + batch * m_cols, sizeof(int8_t) * m_cols);
// Compute dot-product for every column.
for (int row = 0; row < m_rows; ++row) {
// Get the address of the first element of the row.
int8_t* row_ptr = (int8_t*)matrix + row * m_cols; // NOLINT
if (unaligned) {
memcpy(aligned_row, row_ptr, sizeof(int8_t) * m_cols);
row_ptr = aligned_row;
}
// Initialize the dot product sum for the row to 0.
int32x4_t dotprod_32x4 = vmovq_n_s32(0);
// Prefetch the row to cache.
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
3 /* temporal locality */);
// For every block of 16 8-bit elements.
int col = 0;
for (; col < postamble_half_start; col += kInt8ValuesPerNeonVector) {
// Load 16 8-bit values from the row and vector, each, to operate on.
// Here the assumption is that each buffer is 4-byte aligned. Otherwise,
// performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr[col]) & (kNeonVectorAlignment - 1), 0);
const int8x16_t s1_8x16 = vld1q_s8((const int8_t*)(aligned_vec + col));
const int8x16_t s2_8x16 = vld1q_s8((const int8_t*)(row_ptr + col));
// Multiply the low bits (i.e. the lower 8 8bit numbers in the
// registers).
int16x8_t prod_16x8 =
vmull_s8(vget_low_s8(s1_8x16), vget_low_s8(s2_8x16));
// Multiply the high bits (i.e. the higher 8 8bit numbers in the
// registers), and accumulate with the result of the low bits product.
// The assumption here is that overflow will not happen as we quantize
// our values to be in the range [-127, 127]. As such the sum of the 2
// products is always strictly smaller than 15-bits (32767 in absolute
// value).
prod_16x8 =
vmlal_s8(prod_16x8, vget_high_s8(s1_8x16), vget_high_s8(s2_8x16));
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
} // for col
// Half iteration dealing only 8 elements
if (TFLITE_UNLIKELY(col < postamble_start)) {
// Load 8 8-bit values from the row and column each to operate on.
// Here the assumption is that each buffer is 4-bytes aligned.
// Otherwise, performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr[col]) & (kNeonVectorAlignment - 1), 0);
const int8x8_t s1_8x8 = vld1_s8((const int8_t*)(aligned_vec + col));
const int8x8_t s2_8x8 = vld1_s8((const int8_t*)(row_ptr + col));
const int16x8_t prod_16x8 = vmull_s8(s1_8x8, s2_8x8);
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
col += (kInt8ValuesPerNeonVector >> 1);
}
// Add the 4 intermediate sum values to get the final dot-prod value for
// this row.
int32_t dotprod = AccumulateNeonLane(dotprod_32x4);
// Postamble loop.
for (; TFLITE_UNLIKELY(col < m_cols); ++col) {
dotprod += row_ptr[col] * aligned_vec[col];
} // for col
*result += dotprod * batch_scaling_factor;
++result;
} // for row
} // for batch
if (unaligned) {
free(aligned_row_free);
}
free(aligned_vec_free);
}
void NeonMatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix,
const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* scaling_factors,
int n_batch, int32_t* scratch,
float* __restrict__ result,
CpuBackendContext* context) {
if (m_rows % 4 == 0) {
const int32_t* bias = static_cast<const int32_t*>(nullptr);
NeonCpuBackendGemm(vectors, bias, matrix, n_batch, m_cols, m_rows,
/*output_zp =*/0, scratch, context);
// Multiply by float scaling factors and write to result
const int total_size = n_batch * m_rows;
int i = 0;
for (; i <= total_size - 8; i += 8, result += 8) {
const float batch_scaling_factor0 = scaling_factors[i / m_rows];
const float batch_scaling_factor1 = scaling_factors[(i + 4) / m_rows];
const float32x4_t scaling_factor0 = vdupq_n_f32(batch_scaling_factor0);
const float32x4_t scaling_factor1 = vdupq_n_f32(batch_scaling_factor1);
const int32x4_t scratch_val0 = vld1q_s32(scratch + i);
const int32x4_t scratch_val1 = vld1q_s32(scratch + i + 4);
const float32x4_t float_val0 = vcvtq_f32_s32(scratch_val0);
const float32x4_t float_val1 = vcvtq_f32_s32(scratch_val1);
const float32x4_t result0 =
vmlaq_f32(vld1q_f32(result), float_val0, scaling_factor0);
const float32x4_t result1 =
vmlaq_f32(vld1q_f32(result + 4), float_val1, scaling_factor1);
vst1q_f32(result, result0);
vst1q_f32(result + 4, result1);
}
scratch += i;
for (; TFLITE_UNLIKELY(i < total_size); i++) {
const float batch_scaling_factor = scaling_factors[i / m_rows];
int32_t x = *(scratch++);
*result += x * batch_scaling_factor;
++result;
}
return;
}
NeonMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors,
scaling_factors, n_batch, result);
}
void NeonMatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar,
int32_t n_row, int32_t n_col,
int32_t* output) {
// Processing multiple rows at the same time actually makes it slower. :(
for (int i = 0; i < n_row; ++i) {
int32x4_t row_sum = vdupq_n_s32(0);
int j = 0;
const int8_t* row_ptr = matrix + i * n_col;
for (; j <= n_col - kInt8ValuesPerNeonVector;
j += kInt8ValuesPerNeonVector) {
const int8x16_t input_value = vld1q_s8(row_ptr + j);
int16x8_t temp = vmovl_s8(vget_low_s8(input_value));
temp = vaddw_s8(temp, vget_high_s8(input_value));
row_sum = vpadalq_s16(row_sum, temp);
}
int32_t sum = AccumulateNeonLane(row_sum);
for (; TFLITE_UNLIKELY(j < n_col); ++j) {
sum += *(row_ptr + j);
}
output[i] += sum * scalar;
}
}
void NeonMatrixBatchVectorMultiplyAccumulateImpl(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* row_sums) {
#ifdef __aarch64__
if (HasSdotInstruction() && m_cols % 16 == 0 && m_rows % 2 == 0 &&
m_rows >= n_batch) {
if (n_batch % 4 == 0) {
DotprodMatrixBatchFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
per_channel_scale, input_offset, row_sums);
return;
} else if (n_batch >= 2 && m_rows * m_cols >= 128 * 128) {
DotprodMatrixBatchPaddedFourVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
per_channel_scale, input_offset, row_sums);
return;
}
}
#endif // __aarch64__
bool unaligned = false;
int8_t* aligned_row = nullptr;
void* aligned_row_free = nullptr;
if ((m_cols & (kNeonVectorAlignment - 1)) != 0) {
unaligned = true;
aligned_row =
(int8_t*)aligned_alloc(kNeonVectorAlignment, m_cols, // NOLINT
&aligned_row_free);
}
void* aligned_vec_free = nullptr;
int8_t* aligned_vec =
(int8_t*)aligned_alloc(kNeonVectorAlignment, m_cols, // NOLINT
&aligned_vec_free);
const int postamble_half_start =
RoundDownVectors<kInt8ValuesPerNeonVector>(m_cols);
const int postamble_start =
RoundDownVectors<(kInt8ValuesPerNeonVector / 2)>(m_cols);
int32_t* row_sums_ptr = row_sums;
if (row_sums == nullptr) {
row_sums_ptr = static_cast<int32_t*>(malloc(sizeof(int32_t) * m_rows));
NeonReductionSumVector(matrix, row_sums_ptr, m_rows, m_cols);
}
for (int batch = 0; batch < n_batch; ++batch) {
const float batch_scaling_factor = scaling_factors[batch];
const int batch_input_offset = input_offset[batch];
memcpy(aligned_vec, vectors + batch * m_cols, sizeof(int8_t) * m_cols);
for (int row = 0; row < m_rows; ++row) {
int8_t* row_ptr = (int8_t*)matrix + row * m_cols; // NOLINT
if (unaligned) {
memcpy(aligned_row, row_ptr, sizeof(int8_t) * m_cols);
row_ptr = aligned_row;
}
float scale = batch_scaling_factor;
if (per_channel_scale) {
scale *= per_channel_scale[row];
}
// Initialize the dot product sum for the row to 0.
int32x4_t dotprod_32x4 = vmovq_n_s32(0);
// Prefetch the row to cache.
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
3 /* temporal locality */);
// For every block of 16 8-bit elements.
int col = 0;
for (; col < postamble_half_start; col += kInt8ValuesPerNeonVector) {
// Load 16 8-bit values from the row and vector, each, to operate on.
// Here the assumption is that each buffer is 4-byte aligned. Otherwise,
// performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr[col]) & (kNeonVectorAlignment - 1), 0);
const int8x16_t s1_8x16 = vld1q_s8((const int8_t*)(aligned_vec + col));
const int8x16_t s2_8x16 = vld1q_s8((const int8_t*)(row_ptr + col));
// Multiply the low bits (i.e. the lower 8 8bit numbers in the
// registers).
int16x8_t prod_16x8 =
vmull_s8(vget_low_s8(s1_8x16), vget_low_s8(s2_8x16));
// Multiply the high bits (i.e. the higher 8 8bit numbers in the
// registers), and accumulate with the result of the low bits product.
// The assumption here is that overflow will not happen as we quantize
// our values to be in the range [-127, 127]. As such the sum of the 2
// products is always strictly smaller than 15-bits (32767 in absolute
// value).
prod_16x8 =
vmlal_s8(prod_16x8, vget_high_s8(s1_8x16), vget_high_s8(s2_8x16));
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
} // for col
// Half iteration dealing only 8 elements
if (TFLITE_UNLIKELY(col < postamble_start)) {
// Load 8 8-bit values from the row and column each to operate on.
// Here the assumption is that each buffer is 4-bytes aligned.
// Otherwise, performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr[col]) & (kNeonVectorAlignment - 1), 0);
const int8x8_t s1_8x8 = vld1_s8((const int8_t*)(aligned_vec + col));
const int8x8_t s2_8x8 = vld1_s8((const int8_t*)(row_ptr + col));
const int16x8_t prod_16x8 = vmull_s8(s1_8x8, s2_8x8);
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
col += (kInt8ValuesPerNeonVector >> 1);
}
int32_t dotprod = AccumulateNeonLane(dotprod_32x4);
// Postamble loop.
for (; TFLITE_UNLIKELY(col < m_cols); ++col) {
dotprod += row_ptr[col] * aligned_vec[col];
} // for col
dotprod -= row_sums_ptr[row] * batch_input_offset;
*result += dotprod * scale;
++result;
} // for row
} // for batch
if (row_sums == nullptr) {
free(row_sums_ptr);
}
if (unaligned) {
free(aligned_row_free);
}
free(aligned_vec_free);
}
void NeonMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context) {
#ifdef TFLITE_WITH_RUY_GEMV
const bool use_cpu_backend_gemm = true;
#else
const bool use_cpu_backend_gemm = UseCpuBackendGemm(m_rows, m_cols, n_batch);
#endif
if (input_offset == nullptr) {
if (use_cpu_backend_gemm && context) {
NeonMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors,
scaling_factors, n_batch, scratch,
result, context);
return;
}
NeonMatrixBatchVectorMultiplyAccumulate(matrix, m_rows, m_cols, vectors,
scaling_factors, n_batch, result);
return;
}
if (compute_row_sums == nullptr || *compute_row_sums) {
NeonReductionSumVector(matrix, row_sums, m_rows, m_cols);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
if (use_cpu_backend_gemm) {
if (context != nullptr && m_rows % 4 == 0) {
const int32_t* bias = static_cast<const int32_t*>(nullptr);
NeonCpuBackendGemm(vectors, bias, matrix, n_batch, m_cols, m_rows, 0,
scratch, context);
// Multiply by float scaling factors and write to result
const int total_size = n_batch * m_rows;
int i = 0;
int32_t* scratch_ptr = scratch;
for (; i <= total_size - 8; i += 8, result += 8) {
const float batch_scaling_factor0 = scaling_factors[i / m_rows];
const float batch_scaling_factor1 = scaling_factors[(i + 4) / m_rows];
const int batch_input_offset0 = -input_offset[i / m_rows];
const int batch_input_offset1 = -input_offset[(i + 4) / m_rows];
float32x4_t scaling_factor0 = vdupq_n_f32(batch_scaling_factor0);
float32x4_t scaling_factor1 = vdupq_n_f32(batch_scaling_factor1);
if (per_channel_scale) {
const float32x4_t per_channel_scale0 =
vld1q_f32(&per_channel_scale[i % m_rows]);
const float32x4_t per_channel_scale1 =
vld1q_f32(&per_channel_scale[(i + 4) % m_rows]);
scaling_factor0 = vmulq_f32(scaling_factor0, per_channel_scale0);
scaling_factor1 = vmulq_f32(scaling_factor1, per_channel_scale1);
}
const int32x4_t input_offset0 = vdupq_n_s32(batch_input_offset0);
const int32x4_t input_offset1 = vdupq_n_s32(batch_input_offset1);
const int32x4_t row_sum0 = vld1q_s32(row_sums + (i % m_rows));
const int32x4_t row_sum1 = vld1q_s32(row_sums + ((i + 4) % m_rows));
const int32x4_t scratch_val0 = vld1q_s32(scratch_ptr + i);
const int32x4_t scratch_val1 = vld1q_s32(scratch_ptr + i + 4);
const int32x4_t dotprod0 =
vmlaq_s32(scratch_val0, row_sum0, input_offset0);
const int32x4_t dotprod1 =
vmlaq_s32(scratch_val1, row_sum1, input_offset1);
const float32x4_t float_val0 = vcvtq_f32_s32(dotprod0);
const float32x4_t float_val1 = vcvtq_f32_s32(dotprod1);
const float32x4_t result0 =
vmlaq_f32(vld1q_f32(result), float_val0, scaling_factor0);
const float32x4_t result1 =
vmlaq_f32(vld1q_f32(result + 4), float_val1, scaling_factor1);
vst1q_f32(result, result0);
vst1q_f32(result + 4, result1);
}
scratch_ptr += i;
for (; TFLITE_UNLIKELY(i < total_size); i++) {
float batch_scaling_factor = scaling_factors[i / m_rows];
if (per_channel_scale) {
batch_scaling_factor *= per_channel_scale[i % m_rows];
}
const int32_t zero_point = input_offset[i / m_rows];
int32_t dotprod = *(scratch_ptr++);
dotprod -= row_sums[i % m_rows] * zero_point;
*result += dotprod * batch_scaling_factor;
++result;
}
return;
}
}
NeonMatrixBatchVectorMultiplyAccumulateImpl(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
per_channel_scale, input_offset, row_sums);
}
inline int64x2x2_t MulAdd(int32x4_t acc, int32x4_t lhs, int32x4_t rhs) {
int64x2x2_t result;
const int64x2_t lhs_low = vmovl_s32(vget_low_s32(lhs));
const int64x2_t lhs_high = vmovl_s32(vget_high_s32(lhs));
const int64_t lhs_0 = vgetq_lane_s64(lhs_low, 0);
const int64_t lhs_1 = vgetq_lane_s64(lhs_low, 1);
const int64_t lhs_2 = vgetq_lane_s64(lhs_high, 0);
const int64_t lhs_3 = vgetq_lane_s64(lhs_high, 1);
const int64x2_t rhs_low = vmovl_s32(vget_low_s32(rhs));
const int64x2_t rhs_high = vmovl_s32(vget_high_s32(rhs));
const int64_t rhs_0 = vgetq_lane_s64(rhs_low, 0);
const int64_t rhs_1 = vgetq_lane_s64(rhs_low, 1);
const int64_t rhs_2 = vgetq_lane_s64(rhs_high, 0);
const int64_t rhs_3 = vgetq_lane_s64(rhs_high, 1);
const int64x2_t mul_0 = {lhs_0 * rhs_0, lhs_1 * rhs_1};
const int64x2_t mul_1 = {lhs_2 * rhs_2, lhs_3 * rhs_3};
result.val[0] = vaddq_s64(vmovl_s32(vget_low_s32(acc)), mul_0);
result.val[1] = vaddq_s64(vmovl_s32(vget_high_s32(acc)), mul_1);
return result;
}
void NeonApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights,
const int32_t* bias, int32_t layer_norm_scale_a,
int32_t layer_norm_scale_b, int32_t variance_limit,
int n_batch, int n_input, int16_t* output) {
const int32 int16_max = std::numeric_limits<int16>::max();
const int32 int16_min = std::numeric_limits<int16>::min();
const int32 temp = 1048576 / n_input;
for (int i = 0; i < n_batch; ++i) {
int64_t sum = 0;
int64_t sum_sq = 0;
int j = 0;
for (; j <= n_input - 8; j += 8) {
const int32 index = i * n_input + j;
const int16x8_t val_s16 = vld1q_s16(input + index);
const int32x4_t val_s32_0 = vmovl_s16(vget_low_s16(val_s16));
const int32x4_t val_s32_1 = vmovl_s16(vget_high_s16(val_s16));
sum += static_cast<int64_t>(AccumulateNeonLane(val_s32_0));
sum += static_cast<int64_t>(AccumulateNeonLane(val_s32_1));
sum_sq += static_cast<int64_t>(
AccumulateNeonLane(vmulq_s32(val_s32_0, val_s32_0)));
sum_sq += static_cast<int64_t>(
AccumulateNeonLane(vmulq_s32(val_s32_1, val_s32_1)));
}
for (; TFLITE_UNLIKELY(j < n_input); ++j) {
const int32 index = i * n_input + j;
int32 val = static_cast<int32_t>(input[index]);
sum += val;
sum_sq += val * val;
}
int32_t mean =
static_cast<int32_t>(static_cast<int64_t>(sum) * 1024 / n_input);
// TODO(jianlijianli): Avoids overflow but only works for POT n_input.
int64_t variance =
sum_sq * temp - static_cast<int64_t>(mean) * static_cast<int64_t>(mean);
int32_t variance2 = static_cast<int32>(variance / 1048576);
if (variance2 < 1) {
variance2 = variance_limit;
}
int32_t stddev_inverse_a;
int stddev_inverse_b;
GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1,
&stddev_inverse_a, &stddev_inverse_b);
j = 0;
const int32x4_t mean_dup = vdupq_n_s32(mean);
for (; j <= n_input - 16; j += 16) {
// Load 16 items at once.
const int32 index = i * n_input + j;
const int16x8_t val_s16_0 = vld1q_s16(input + index);
const int16x8_t val_s16_1 = vld1q_s16(input + index + 8);
int32x4x4_t shifted;
shifted.val[0] = vsubq_s32(
vshlq_n_s32(vmovl_s16(vget_low_s16(val_s16_0)), 10), mean_dup);
shifted.val[1] = vsubq_s32(
vshlq_n_s32(vmovl_s16(vget_high_s16(val_s16_0)), 10), mean_dup);
shifted.val[2] = vsubq_s32(
vshlq_n_s32(vmovl_s16(vget_low_s16(val_s16_1)), 10), mean_dup);
shifted.val[3] = vsubq_s32(
vshlq_n_s32(vmovl_s16(vget_high_s16(val_s16_1)), 10), mean_dup);
int32x4x4_t rescaled = MultiplyByQuantizedMultiplier4Rows(
shifted, stddev_inverse_a, stddev_inverse_b);
const int32x4_t bias_0 = vld1q_s32(bias + j);
const int32x4_t bias_1 = vld1q_s32(bias + j + 4);
const int32x4_t bias_2 = vld1q_s32(bias + j + 8);
const int32x4_t bias_3 = vld1q_s32(bias + j + 12);
const int16x8_t layer_norm_weights_s16_0 =
vld1q_s16(layer_norm_weights + j);
const int16x8_t layer_norm_weights_s16_1 =
vld1q_s16(layer_norm_weights + j + 8);
const int32x4_t layer_norm_weights_s32_0 =
vmovl_s16(vget_low_s16(layer_norm_weights_s16_0));
const int32x4_t layer_norm_weights_s32_1 =
vmovl_s16(vget_high_s16(layer_norm_weights_s16_0));
const int32x4_t layer_norm_weights_s32_2 =
vmovl_s16(vget_low_s16(layer_norm_weights_s16_1));
const int32x4_t layer_norm_weights_s32_3 =
vmovl_s16(vget_high_s16(layer_norm_weights_s16_1));
int64x2x2_t val3_0 =
MulAdd(bias_0, rescaled.val[0], layer_norm_weights_s32_0);
int64x2x2_t val3_1 =
MulAdd(bias_1, rescaled.val[1], layer_norm_weights_s32_1);
int64x2x2_t val3_2 =
MulAdd(bias_2, rescaled.val[2], layer_norm_weights_s32_2);
int64x2x2_t val3_3 =
MulAdd(bias_3, rescaled.val[3], layer_norm_weights_s32_3);
int32x4x4_t val4;
val4.val[0] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_0.val[0], 10)),
vmovn_s64(vrshrq_n_s64(val3_0.val[1], 10)));
val4.val[1] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_1.val[0], 10)),
vmovn_s64(vrshrq_n_s64(val3_1.val[1], 10)));
val4.val[2] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_2.val[0], 10)),
vmovn_s64(vrshrq_n_s64(val3_2.val[1], 10)));
val4.val[3] = vcombine_s32(vmovn_s64(vrshrq_n_s64(val3_3.val[0], 10)),
vmovn_s64(vrshrq_n_s64(val3_3.val[1], 10)));
int32x4x4_t val5_s32 = MultiplyByQuantizedMultiplier4Rows(
val4, layer_norm_scale_a, layer_norm_scale_b + 12);
vst1_s16(output + index, vqmovn_s32(val5_s32.val[0]));
vst1_s16(output + index + 4, vqmovn_s32(val5_s32.val[1]));
vst1_s16(output + index + 8, vqmovn_s32(val5_s32.val[2]));
vst1_s16(output + index + 12, vqmovn_s32(val5_s32.val[3]));
}
for (; TFLITE_UNLIKELY(j < n_input); ++j) {
const int32 index = i * n_input + j;
int32 val = static_cast<int32_t>(input[index]);
int32 shifted = 1024 * val - mean;
int32 rescaled = MultiplyByQuantizedMultiplier(shifted, stddev_inverse_a,
stddev_inverse_b);
// TODO(jianlijianli): Saturate this.
int64_t val3 = rescaled * layer_norm_weights[j] + bias[j];
int32 val4 =
static_cast<int32>((val3 > 0 ? val3 + 512 : val3 - 512) / 1024);
int32 val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a,
layer_norm_scale_b + 12);
val5 = std::min(std::max(int16_min, val5), int16_max);
output[index] = static_cast<int16_t>(val5);
}
}
}
void NeonApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
int i = 0;
#ifdef GEMMLOWP_NEON
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<int16x8_t, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<int16x8_t, 3>;
for (; i <= n_input - 32; i += 32) {
const int index = batch * n_input + i;
F3 input0 = F3::FromRaw(vld1q_s16(input + index));
F3 input1 = F3::FromRaw(vld1q_s16(input + index + 8));
F3 input2 = F3::FromRaw(vld1q_s16(input + index + 16));
F3 input3 = F3::FromRaw(vld1q_s16(input + index + 24));
F0 output0 = gemmlowp::logistic(input0);
F0 output1 = gemmlowp::logistic(input1);
F0 output2 = gemmlowp::logistic(input2);
F0 output3 = gemmlowp::logistic(input3);
vst1q_s16(output + index, output0.raw());
vst1q_s16(output + index + 8, output1.raw());
vst1q_s16(output + index + 16, output2.raw());
vst1q_s16(output + index + 24, output3.raw());
}
#endif // GEMMLOWP_NEON
using F0_Scalar = gemmlowp::FixedPoint<int16_t, 0>;
using F3_Scalar = gemmlowp::FixedPoint<int16_t, 3>;
for (; i < n_input; ++i) {
const int index = batch * n_input + i;
F3_Scalar input_f3 = F3_Scalar::FromRaw(input[index]);
F0_Scalar output_f0 = gemmlowp::logistic(input_f3);
output[index] = output_f0.raw();
}
}
}
template <int IntegerBits>
void NeonApplyTanhImpl(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
int i = 0;
#ifdef GEMMLOWP_NEON
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F_In = gemmlowp::FixedPoint<int16x8_t, IntegerBits>;
using F_Out = gemmlowp::FixedPoint<int16x8_t, 0>;
for (; i <= n_input - 32; i += 32) {
const int index = batch * n_input + i;
F_In input0 = F_In::FromRaw(vld1q_s16(input + index));
F_In input1 = F_In::FromRaw(vld1q_s16(input + index + 8));
F_In input2 = F_In::FromRaw(vld1q_s16(input + index + 16));
F_In input3 = F_In::FromRaw(vld1q_s16(input + index + 24));
F_Out output0 = gemmlowp::tanh(input0);
F_Out output1 = gemmlowp::tanh(input1);
F_Out output2 = gemmlowp::tanh(input2);
F_Out output3 = gemmlowp::tanh(input3);
vst1q_s16(output + index, output0.raw());
vst1q_s16(output + index + 8, output1.raw());
vst1q_s16(output + index + 16, output2.raw());
vst1q_s16(output + index + 24, output3.raw());
}
#endif // GEMMLOWP_NEON
using F_In_Scalar = gemmlowp::FixedPoint<int16_t, IntegerBits>;
using F_Out_Scalar = gemmlowp::FixedPoint<int16_t, 0>;
for (; i < n_input; ++i) {
const int index = batch * n_input + i;
F_In_Scalar input_in = F_In_Scalar::FromRaw(input[index]);
F_Out_Scalar output_out = gemmlowp::tanh(input_in);
output[index] = output_out.raw();
}
}
}
void NeonApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output) {
assert(integer_bits <= 6);
#define DISPATCH_TANH(i) \
case i: \
NeonApplyTanhImpl<i>(input, n_batch, n_input, output); \
break;
switch (integer_bits) {
DISPATCH_TANH(0);
DISPATCH_TANH(1);
DISPATCH_TANH(2);
DISPATCH_TANH(3);
DISPATCH_TANH(4);
DISPATCH_TANH(5);
DISPATCH_TANH(6);
default:
return;
}
#undef DISPATCH_TANH
}
void NeonCwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int shift, int16_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
int i = 0;
for (; i <= n_input - 8; i += 8) {
const int index = batch * n_input + i;
const int16x8_t a = vld1q_s16(input_1 + index);
const int16x8_t b = vld1q_s16(input_2 + index);
const int32x4_t a_s32_0 = vmovl_s16(vget_low_s16(a));
const int32x4_t a_s32_1 = vmovl_s16(vget_high_s16(a));
const int32x4_t b_s32_0 = vmovl_s16(vget_low_s16(b));
const int32x4_t b_s32_1 = vmovl_s16(vget_high_s16(b));
int32x4_t x_0 = vmulq_s32(a_s32_0, b_s32_0);
int32x4_t x_1 = vmulq_s32(a_s32_1, b_s32_1);
x_0 = gemmlowp::RoundingDivideByPOT(x_0, shift);
x_1 = gemmlowp::RoundingDivideByPOT(x_1, shift);
const int16x8_t result = vcombine_s16(vmovn_s32(x_0), vmovn_s32(x_1));
vst1q_s16(output + index, result);
}
for (; TFLITE_UNLIKELY(i < n_input); ++i) {
const int index = batch * n_input + i;
const int16_t a = input_1[index];
const int16_t b = input_2[index];
int64_t x = a * b;
if (x > std::numeric_limits<std::int32_t>::max()) {
x = std::numeric_limits<std::int32_t>::max();
}
const int32_t value = static_cast<int32_t>(x);
output[index] =
static_cast<int16_t>(gemmlowp::RoundingDivideByPOT(value, shift));
}
}
}
void NeonCwiseMul(const int16_t* input_1, const int16_t* input_2,
int32_t multiplier, int shift, int n_batch, int n_input,
int32_t output_zp, int8_t* output) {
const int32_t output_min = std::numeric_limits<int8_t>::min();
const int32_t output_max = std::numeric_limits<int8_t>::max();
const int32x4_t output_zp_dup = vdupq_n_s32(-output_zp);
const int32x4_t max_val_dup = vdupq_n_s32(output_max);
const int32x4_t min_val_dup = vdupq_n_s32(output_min);
for (int batch = 0; batch < n_batch; ++batch) {
int i = 0;
for (; i <= n_input - 8; i += 8) {
const int index = batch * n_input + i;
const int16x8_t a = vld1q_s16(input_1 + index);
const int16x8_t b = vld1q_s16(input_2 + index);
const int32x4_t a_s32_0 = vmovl_s16(vget_low_s16(a));
const int32x4_t a_s32_1 = vmovl_s16(vget_high_s16(a));
const int32x4_t b_s32_0 = vmovl_s16(vget_low_s16(b));
const int32x4_t b_s32_1 = vmovl_s16(vget_high_s16(b));
int32x4x2_t temp_val;
temp_val.val[0] = vmulq_s32(a_s32_0, b_s32_0);
temp_val.val[1] = vmulq_s32(a_s32_1, b_s32_1);
temp_val =
MultiplyByQuantizedMultiplier2Rows(temp_val, multiplier, shift);
temp_val.val[0] = vaddq_s32(temp_val.val[0], output_zp_dup);
temp_val.val[1] = vaddq_s32(temp_val.val[1], output_zp_dup);
temp_val.val[0] =
vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup);
temp_val.val[1] =
vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup);
const int16x8_t result =
vcombine_s16(vmovn_s32(temp_val.val[0]), vmovn_s32(temp_val.val[1]));
vst1_s8(output + index, vmovn_s16(result));
}
for (; TFLITE_UNLIKELY(i < n_input); ++i) {
const int index = batch * n_input + i;
const int16_t a = input_1[index];
const int16_t b = input_2[index];
int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
value = MultiplyByQuantizedMultiplier(value, multiplier, shift);
value -= output_zp;
value = std::min(std::max(-128, value), 127);
output[index] = static_cast<int8>(value);
}
}
}
void NeonCwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int16_t* output) {
const int32 int16_max = std::numeric_limits<int16>::max();
const int32 int16_min = std::numeric_limits<int16>::min();
for (int batch = 0; batch < n_batch; ++batch) {
int i = 0;
for (; i <= n_input - 8; i += 8) {
const int index = batch * n_input + i;
const int16x8_t a = vld1q_s16(input_1 + index);
const int16x8_t b = vld1q_s16(input_2 + index);
const int32x4_t a_s32_0 = vmovl_s16(vget_low_s16(a));
const int32x4_t a_s32_1 = vmovl_s16(vget_high_s16(a));
const int32x4_t b_s32_0 = vmovl_s16(vget_low_s16(b));
const int32x4_t b_s32_1 = vmovl_s16(vget_high_s16(b));
const int32x4_t sum_0 = vaddq_s32(a_s32_0, b_s32_0);
const int32x4_t sum_1 = vaddq_s32(a_s32_1, b_s32_1);
vst1_s16(output + index, vqmovn_s32(sum_0));
vst1_s16(output + index + 4, vqmovn_s32(sum_1));
}
for (; TFLITE_UNLIKELY(i < n_input); ++i) {
const int index = batch * n_input + i;
int32_t sum = input_1[index] + input_2[index];
const int32 sum_clamped = std::min(int16_max, std::max(int16_min, sum));
output[index] = static_cast<int16_t>(sum_clamped);
}
}
}
void NeonCwiseClipping(float* vector, const int v_size,
const float clipping_value) {
const float32x4_t clipping_value_f32x4 = vmovq_n_f32(clipping_value);
const float32x4_t neg_clipping_value_f32x4 = vmovq_n_f32(-clipping_value);
int i = 0;
for (; i <= v_size - kFloatValuesPerNeonVector;
i += kFloatValuesPerNeonVector) {
// Load from memory to vector.
float32x4_t v_f32x4 = vld1q_f32(vector + i);
// Clip between clipping_value and -clipping_value.
v_f32x4 = vminq_f32(clipping_value_f32x4, v_f32x4);
v_f32x4 = vmaxq_f32(neg_clipping_value_f32x4, v_f32x4);
// Save to output.
vst1q_f32(vector + i, v_f32x4);
}
for (; TFLITE_UNLIKELY(i < v_size); i++) {
vector[i] = std::max(std::min(clipping_value, vector[i]), -clipping_value);
}
}
void NeonCwiseClipping(int16_t* vector, const int v_size,
const int16_t clipping_value) {
const int16x8_t max_dup = vdupq_n_s16(clipping_value);
const int16x8_t min_dup = vdupq_n_s16(-clipping_value);
int i = 0;
for (; i <= v_size - kInt16ValuesPerNeonVector * 2;
i += kInt16ValuesPerNeonVector * 2) {
int16x8_t val_0 = vld1q_s16(vector + i);
int16x8_t val_1 = vld1q_s16(vector + i + kInt16ValuesPerNeonVector);
val_0 = vminq_s16(val_0, max_dup);
val_1 = vminq_s16(val_1, max_dup);
val_0 = vmaxq_s16(val_0, min_dup);
val_1 = vmaxq_s16(val_1, min_dup);
vst1q_s16(vector + i, val_0);
vst1q_s16(vector + i + kInt16ValuesPerNeonVector, val_1);
}
for (; TFLITE_UNLIKELY(i < v_size); i++) {
vector[i] = std::max(std::min(clipping_value, vector[i]),
static_cast<int16_t>(-clipping_value));
}
}
void NeonCwiseClipping(int8_t* vector, const int v_size,
const int8_t clipping_value) {
const int8x16_t max_dup = vdupq_n_s8(clipping_value);
const int8x16_t min_dup = vdupq_n_s8(-clipping_value);
int i = 0;
for (; i < v_size - kInt8ValuesPerNeonVector * 2;
i += kInt8ValuesPerNeonVector * 2) {
int8x16_t val_0 = vld1q_s8(vector + i);
int8x16_t val_1 = vld1q_s8(vector + i + kInt8ValuesPerNeonVector);
val_0 = vminq_s8(val_0, max_dup);
val_1 = vminq_s8(val_1, max_dup);
val_0 = vmaxq_s8(val_0, min_dup);
val_1 = vmaxq_s8(val_1, min_dup);
vst1q_s8(vector + i, val_0);
vst1q_s8(vector + i + kInt8ValuesPerNeonVector, val_1);
}
for (; TFLITE_UNLIKELY(i < v_size); i++) {
vector[i] = std::max(std::min(clipping_value, vector[i]),
static_cast<int8_t>(-clipping_value));
}
}
void NeonSparseMatrixBatchVectorMultiplyAccumulate1x4(
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
const int32_t* __restrict__ indices, int m_rows, int m_cols,
const float* __restrict__ vector, int n_batch, float* __restrict__ result) {
constexpr int kBlockSize = kFloatValuesPerNeonVector;
TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0);
for (int batch = 0; batch < n_batch; batch++) {
const float* matrix_ptr = matrix;
for (int row = 0; row < m_rows; row++) {
float32x4_t acc_32x4 = vmovq_n_f32(0.0);
const float* vector_in_batch = vector + batch * m_cols;
for (int i = segments[row]; i < segments[row + 1]; i++) {
const int block_start_index = indices[i] * kBlockSize;
const float* vector_block_in_batch_ptr =
vector_in_batch + block_start_index;
// Load 4 float values from the vector and matrix row.
float32x4_t vector_f32x4 = vld1q_f32(vector_block_in_batch_ptr);
float32x4_t matrix_f32x4 = vld1q_f32(matrix_ptr);
// Multiply the vector and matrix row and add to accumulator.
acc_32x4 = vmlaq_f32(acc_32x4, matrix_f32x4, vector_f32x4);
matrix_ptr += kBlockSize;
}
result[batch * m_rows + row] += AccumulateNeonLane(acc_32x4);
}
}
}
void NeonSparseMatrixBatchVectorMultiplyAccumulate(
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
float* __restrict__ result) {
constexpr int kNeonVectorsPerBlock = 4;
constexpr int kBlockSize = kNeonVectorsPerBlock * kFloatValuesPerNeonVector;
TFLITE_DCHECK_EQ( // NOLINT
m_cols % kBlockSize, 0);
for (int batch = 0; batch < n_batch; batch++) {
const float* matrix_ptr = matrix;
const uint8_t* ledger_ptr = ledger;
for (int row = 0; row < m_rows; row++) {
int num_nonzero_blocks = *ledger_ptr++;
if (num_nonzero_blocks > 0) {
float32x4_t acc_32x4 = vmovq_n_f32(0.0);
const float* vector_in_batch = vector + batch * m_cols;
for (int i = 0; i < num_nonzero_blocks; i++) {
const int block_start_index = *ledger_ptr++ * kBlockSize;
const float* vector_block_in_batch_ptr =
vector_in_batch + block_start_index;
for (int c = 0; c < kNeonVectorsPerBlock; c++) {
// Load 4 float values from the vector and matrix row.
float32x4_t vector_f32x4 = vld1q_f32(vector_block_in_batch_ptr +
c * kFloatValuesPerNeonVector);
float32x4_t matrix_f32x4 =
vld1q_f32(matrix_ptr + c * kFloatValuesPerNeonVector);
// Multiply the vector and matrix row and add to accumulator.
acc_32x4 = vmlaq_f32(acc_32x4, matrix_f32x4, vector_f32x4);
}
matrix_ptr += kBlockSize;
}
result[batch * m_rows + row] += AccumulateNeonLane(acc_32x4);
}
}
}
}
void NeonSparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
const int m_cols, const int8_t* __restrict__ vectors,
const float* scaling_factors, int n_batch, float* __restrict__ result) {
#ifdef __aarch64__
if (HasSdotInstruction() && m_cols % 16 == 0) {
DotprodSparseMatrixBatchVectorMultiplyAccumulate(
matrix, ledger, m_rows, m_cols, vectors, scaling_factors, n_batch,
result);
return;
}
#endif // __aarch64__
constexpr int kBlockSize = kInt8ValuesPerNeonVector;
TFLITE_DCHECK_EQ( // NOLINT
m_cols % kBlockSize, 0);
void* aligned_vec_free = nullptr;
int8_t* aligned_vec =
(int8_t*)aligned_alloc(kNeonVectorAlignment, m_cols, // NOLINT
&aligned_vec_free);
for (int batch = 0; batch < n_batch; ++batch) {
const float batch_scaling_factor = scaling_factors[batch];
// Copy the vector data to an aligned vector.
memcpy(aligned_vec, vectors + batch * m_cols, sizeof(int8) * m_cols);
const uint8_t* ledger_ptr = ledger;
const int8_t* row_ptr = matrix;
for (int row = 0; row < m_rows; ++row) {
// Initialize the dot product sum for the row to 0.
int32x4_t dotprod_32x4 = vmovq_n_s32(0);
int num_nonzero_blocks = *ledger_ptr++;
if (num_nonzero_blocks > 0) {
// Prefetch the row to cache.
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
3 /* temporal locality */);
for (int i = 0; i < num_nonzero_blocks; i++) {
const int col_index = *ledger_ptr++ * kBlockSize;
// Load 16 8-bit values from the row and vector, each, to operate on.
// Here the assumption is that each buffer is 4-byte aligned.
// Otherwise, performance may suffer significantly.
TFLITE_DCHECK_EQ( // NOLINT
(uintptr_t)(&row_ptr) & (kNeonVectorAlignment - 1), 0);
const int8x16_t s1_8x16 =
vld1q_s8((const int8_t*)(aligned_vec + col_index));
const int8x16_t s2_8x16 = vld1q_s8((const int8_t*)(row_ptr));
// Multiply the low bits (i.e. the lower 8 8bit numbers in the
// registers).
int16x8_t prod_16x8 =
vmull_s8(vget_low_s8(s1_8x16), vget_low_s8(s2_8x16));
// Multiply the high bits (i.e. the lower 8 8bit numbers in the
// registers), and accumulate with the result of the low bits product.
// The assumption here is that overflow will not happen as we quantize
// our values to be in the range [-127, 127]. As such the sum of the 2
// products is always strictly smaller than 15-bits (32767 in absolute
// value).
prod_16x8 =
vmlal_s8(prod_16x8, vget_high_s8(s1_8x16), vget_high_s8(s2_8x16));
dotprod_32x4 = vpadalq_s16(dotprod_32x4, prod_16x8);
row_ptr += kBlockSize;
}
// Add the 4 intermediate sum values to get the final dot-prod value for
// this row.
int32_t dotprod = AccumulateNeonLane(dotprod_32x4);
result[batch * m_rows + row] += dotprod * batch_scaling_factor;
}
} // for row
} // for batch
free(aligned_vec_free);
}
void NeonSub1Vector(const float* vector, int v_size, float* result) {
// If v_size is not divisible by the vector size, then we need to process the
// final few elements sequentially. postamble_start shows the start index
// where this should happen.
const int postamble_start =
RoundDownVectors<kFloatValuesPerNeonVector>(v_size);
float32x4_t one_f32x4 = vmovq_n_f32(1.0);
int v = 0;
for (; v < postamble_start; v += kFloatValuesPerNeonVector) {
// Load 4 float values from the current pointers of the input column and
// subtract from 1.
float32x4_t v_f32x4 = vld1q_f32(vector + v);
float32x4_t result_f32x4 = vsubq_f32(one_f32x4, v_f32x4);
// Save to output.
vst1q_f32(result + v, result_f32x4);
}
for (; TFLITE_UNLIKELY(v < v_size); v++) {
result[v] = 1.0f - vector[v];
}
}
void NeonSub1Vector(const int16_t* vector, int v_size, int16_t* result) {
int postamble_start = RoundDownVectors<kInt16ValuesPerNeonVector>(v_size);
static const int16_t kOne = 32767;
// Use xor to replace substract from 1 << 15 - 1.
// Local benchmark shows it's slightly faster than pure substract.
const int16x8_t one_dup = vdupq_n_s16(kOne);
int i = 0;
for (; i < postamble_start; i += kInt16ValuesPerNeonVector) {
const int16x8_t input = vld1q_s16(vector + i);
const int16x8_t sub1_result = veorq_s16(one_dup, input);
vst1q_s16(result + i, sub1_result);
}
for (; TFLITE_UNLIKELY(i < v_size); i++) {
result[i] = kOne ^ vector[i];
}
}
namespace {
#ifdef __aarch64__
inline bool IsAllZero(const int8x16_t v_s8x16) {
const uint32_t u32 = vmaxvq_u32(vreinterpretq_u32_s8(v_s8x16));
return !u32;
}
inline bool IsAllZero(const float32x4_t v_f32x4) {
const uint32x4_t cmp_result = vceqzq_f32(v_f32x4);
const uint32_t u32 = vminvq_u32(cmp_result);
return u32;
}
#else
inline bool IsAllZero(const uint32x4_t u32x4) {
const uint32x2_t u32x2 = vqadd_u32(vget_high_u32(u32x4), vget_low_u32(u32x4));
const uint64x1_t u64 = vreinterpret_u64_u32(u32x2);
return !vget_lane_u64(u64, 0);
}
#ifndef __SSE__
// With Intel NEON-2-SSE translator library, this is a redefinition..
inline bool IsAllZero(const int8x16_t v) {
return IsAllZero(vreinterpretq_u32_s8(v));
}
#endif
inline bool IsAllZero(const float32x4_t v_f32x4) {
const float32x4_t zero_f32x4 = vmovq_n_f32(0.0f);
// Compare-absolute greater-than, |v| > |0|, equivalently v != 0
const uint32x4_t cmp_result = vcagtq_f32(v_f32x4, zero_f32x4);
return IsAllZero(cmp_result);
}
#endif
} // namespace
bool NeonIsZeroVector(const float* vector, int v_size) {
// If v_size is not divisible by the vector size, then we need to process the
// final few elements sequentially. postamble_start shows the start index
// where this should happen.
const int postamble_start =
RoundDownVectors<kFloatValuesPerNeonVector>(v_size);
int v = 0;
for (; v < postamble_start; v += kFloatValuesPerNeonVector) {
const float32x4_t v_f32x4 = vld1q_f32(vector + v);
if (!IsAllZero(v_f32x4)) return false;
}
// Postamble loop
for (; TFLITE_UNLIKELY(v < v_size); ++v) {
if (vector[v] != 0.0) return false;
}
return true;
}
bool NeonIsZeroVector(const int8_t* vector, int v_size) {
// If v_size is not divisible by the vector size, then we need to process the
// final few elements sequentially. postamble_start shows the start index
// where this should happen.
const int postamble_start =
RoundDownVectors<kInt8ValuesPerNeonVector>(v_size);
int v = 0;
for (; v < postamble_start; v += kInt8ValuesPerNeonVector) {
const int8x16_t v_s8x16 = vld1q_s8(vector + v);
if (!IsAllZero(v_s8x16)) return false;
}
// Postamble loop
for (; TFLITE_UNLIKELY(v < v_size); ++v) {
if (vector[v] != 0) return false;
}
return true;
}
void NeonVectorScalarMultiply(const int8_t* vector, const int v_size,
const float scale, float* result) {
// Here the assumption is that each buffer is 4-byte aligned.
TFLITE_CHECK_EQ((intptr_t)(&vector[0]) & (kNeonVectorAlignment - 1), 0);
// If v_size is not divisible by kInt8ValuesPerNeonVector, we cannot use the
// main vectorized loop, and we need to process sequentially. postamble_start
// shows the start index where this should happen.
const int postamble_start =
RoundDownVectors<kInt8ValuesPerNeonVector>(v_size);
// Create a vector of 4 floats with the scale value.
const float32x4_t scale_f32x4 = vdupq_n_f32(scale);
int v = 0;
for (; v < postamble_start; v += kInt8ValuesPerNeonVector) {
// Load int8 values, sixteen at a time.
const int8x16_t v_i8x16 = vld1q_s8(vector + v);
// Split it into two components of size eight.
const int8x8_t v0_i8x8 = vget_low_s8(v_i8x16);
const int8x8_t v1_i8x8 = vget_high_s8(v_i8x16);
// Convert both components to int16 first.
const int16x8_t v0_i16x8 = vmovl_s8(v0_i8x8);
const int16x8_t v1_i16x8 = vmovl_s8(v1_i8x8);
// Split each of them into two components each.
const int16x4_t v0_i16x4 = vget_low_s16(v0_i16x8);
const int16x4_t v1_i16x4 = vget_high_s16(v0_i16x8);
const int16x4_t v2_i16x4 = vget_low_s16(v1_i16x8);
const int16x4_t v3_i16x4 = vget_high_s16(v1_i16x8);
// Convert these to int32 and then to float.
float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
float32x4_t v2_f32x4 = vcvtq_f32_s32(vmovl_s16(v2_i16x4));
float32x4_t v3_f32x4 = vcvtq_f32_s32(vmovl_s16(v3_i16x4));
// Vector multiply four floats at a time.
v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
v2_f32x4 = vmulq_f32(v2_f32x4, scale_f32x4);
v3_f32x4 = vmulq_f32(v3_f32x4, scale_f32x4);
// Store the results.
vst1q_f32(result + v, v0_f32x4);
vst1q_f32(result + v + 4, v1_f32x4);
vst1q_f32(result + v + 8, v2_f32x4);
vst1q_f32(result + v + 12, v3_f32x4);
}
if (TFLITE_UNLIKELY(v_size - postamble_start >=
(kInt8ValuesPerNeonVector >> 1))) {
// Load eight int8 values, if there is at least eight remaining.
const int8x8_t v_i8x8 = vld1_s8(vector + v);
// Convert them to int16 first.
const int16x8_t v_i16x8 = vmovl_s8(v_i8x8);
// Split it into two components.
const int16x4_t v0_i16x4 = vget_low_s16(v_i16x8);
const int16x4_t v1_i16x4 = vget_high_s16(v_i16x8);
// Convert the components two floats.
float32x4_t v0_f32x4 = vcvtq_f32_s32(vmovl_s16(v0_i16x4));
float32x4_t v1_f32x4 = vcvtq_f32_s32(vmovl_s16(v1_i16x4));
// Vector multiply four floats at a time.
v0_f32x4 = vmulq_f32(v0_f32x4, scale_f32x4);
v1_f32x4 = vmulq_f32(v1_f32x4, scale_f32x4);
// Store the results.
vst1q_f32(result + v, v0_f32x4);
vst1q_f32(result + v + 4, v1_f32x4);
v += (kInt8ValuesPerNeonVector >> 1);
}
// Postamble loop.
for (; TFLITE_UNLIKELY(v < v_size); v++) {
result[v] = scale * vector[v];
}
}
// TODO(b/185850916): Consider changing the rounding stragey from "ties to away"
// to "ties to even" since vcvtnq_s32_f32 is generally more available.
inline int32x4_t RoundToNearest(const float32x4_t input) {
#if __ARM_ARCH >= 8
return vcvtaq_s32_f32(input);
#else
static const float32x4_t zero_val_dup = vdupq_n_f32(0.0f);
static const float32x4_t point5_val_dup = vdupq_n_f32(0.5f);
const int32x4_t mask = vreinterpretq_s32_u32(vcltq_f32(input, zero_val_dup));
const float32x4_t casted_mask = vcvtq_f32_s32(mask);
const float32x4_t round = vaddq_f32(casted_mask, point5_val_dup);
return vcvtq_s32_f32(vaddq_f32(input, round));
#endif
}
// Note: this function caps minimum and maximum at zero, unlike the true
// minmax_element. This is intentional.
inline void NeonMinMax(const float* values, const int size, float* min,
float* max) {
const int postamble_start = RoundDownVectors<kFloatValuesPerNeonVector>(size);
float rmin = 0.0f, rmax = 0.0f;
int i = 0;
if (postamble_start) {
float32x4_t min_f32x4 = vld1q_f32(values);
float32x4_t max_f32x4 = min_f32x4;
for (i = kFloatValuesPerNeonVector; i < postamble_start;
i += kFloatValuesPerNeonVector) {
const float32x4_t value0_f32x4 = vld1q_f32(&values[i]);
min_f32x4 = vminq_f32(min_f32x4, value0_f32x4);
max_f32x4 = vmaxq_f32(max_f32x4, value0_f32x4);
}
#ifdef __aarch64__
rmin = std::min(rmin, vminvq_f32(min_f32x4));
rmax = std::max(rmax, vmaxvq_f32(max_f32x4));
#else
float32x2_t min_f32x2 =
vmin_f32(vget_low_f32(min_f32x4), vget_high_f32(min_f32x4));
float32x2_t max_f32x2 =
vmax_f32(vget_low_f32(max_f32x4), vget_high_f32(max_f32x4));
min_f32x2 = vpmin_f32(min_f32x2, min_f32x2);
max_f32x2 = vpmax_f32(max_f32x2, max_f32x2);
rmin = std::min(rmin, vget_lane_f32(min_f32x2, 0));
rmax = std::max(rmax, vget_lane_f32(max_f32x2, 0));
#endif // __aarch64__
}
if (TFLITE_UNLIKELY(i < size)) {
const auto minmax =
std::minmax_element(values + postamble_start, values + size);
rmin = std::min(rmin, *minmax.first);
rmax = std::max(rmax, *minmax.second);
}
*min = rmin;
*max = rmax;
}
void NeonSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min,
float* max, float* scaling_factor) {
// TODO(raziel): vectorize min/max calculation.
auto minmax = std::minmax_element(values, values + size);
*min = *minmax.first;
*max = *minmax.second;
NeonSymmetricQuantizeFloats(values, size, quantized_values, *min, *max,
scaling_factor);
}
void NeonSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float min, float max,
float* scaling_factor) {
constexpr int kScale = 127;
const float range = std::max(std::abs(min), std::abs(max));
if (range == 0) {
memset(quantized_values, 0, size * sizeof(int8_t));
*scaling_factor = 1;
return;
}
*scaling_factor = range / kScale;
const float scaling_factor_inv = kScale / range;
const int postamble_start =
RoundDownVectors<(2 * kFloatValuesPerNeonVector)>(size);
// Vectorized constants.
const float32x4_t q_factor_f32x4 = vmovq_n_f32(scaling_factor_inv);
const int32x4_t scale_i32x4 = vmovq_n_s32(kScale);
const int32x4_t neg_scale_i32x4 = vmovq_n_s32(-kScale);
int i = 0;
for (; i < postamble_start; i += 2 * kFloatValuesPerNeonVector) {
// Implements the vectorized version of the following:
// const int32 quantized_value = static_cast<int32>(
// std::round(*scaling_factor * values[i]));
float32x4_t value0_f32x4 = vld1q_f32(&values[i]);
float32x4_t value1_f32x4 =
vld1q_f32(&values[i + kFloatValuesPerNeonVector]);
float32x4_t mul0_f32x4 = vmulq_f32(value0_f32x4, q_factor_f32x4);
float32x4_t mul1_f32x4 = vmulq_f32(value1_f32x4, q_factor_f32x4);
const int32x4_t f2i0_i32x4 = RoundToNearest(mul0_f32x4);
const int32x4_t f2i1_i32x4 = RoundToNearest(mul1_f32x4);
// Implements the vectorized version of the following block:
// quantized_values[i] = std::min(kScale, std::max(-kScale,
// quantized_value));
int32x4_t max0_i32x4 = vmaxq_s32(f2i0_i32x4, neg_scale_i32x4);
int32x4_t max1_i32x4 = vmaxq_s32(f2i1_i32x4, neg_scale_i32x4);
int32x4_t min0_i32x4 = vminq_s32(max0_i32x4, scale_i32x4);
int32x4_t min1_i32x4 = vminq_s32(max1_i32x4, scale_i32x4);
int16x4_t min0_16x4 = vmovn_s32(min0_i32x4);
int16x4_t min1_16x4 = vmovn_s32(min1_i32x4);
int16x8_t min_16x8 = vcombine_s16(min0_16x4, min1_16x4);
int8x8_t min_s8x8 = vqmovn_s16(min_16x8);
vst1_s8(&quantized_values[i], min_s8x8);
}
for (; TFLITE_UNLIKELY(i < size); ++i) {
const int32 quantized_value =
static_cast<int32>(TfLiteRound(scaling_factor_inv * values[i]));
quantized_values[i] = std::min(kScale, std::max(-kScale, quantized_value));
}
}
void NeonAsymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values,
float* scaling_factor, int32_t* offset) {
float rmin, rmax;
NeonMinMax(values, size, &rmin, &rmax);
const int32_t kMinScale = -128;
const int32_t kMaxScale = 127;
const double qmin_double = kMinScale;
const double qmax_double = kMaxScale;
if (rmin == rmax) {
memset(quantized_values, 0, size * sizeof(int8_t));
*scaling_factor = 1;
*offset = 0;
return;
} else {
const double scale = (rmax - rmin) / (qmax_double - qmin_double);
const double zero_point_from_min = qmin_double - rmin / scale;
const double zero_point_from_max = qmax_double - rmax / scale;
const double zero_point_from_min_error =
std::abs(qmin_double) + std::abs(rmin / scale);
const double zero_point_from_max_error =
std::abs(qmax_double) + std::abs(rmax / scale);
const double zero_point_double =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
int8 nudged_zero_point = 0;
if (zero_point_double <= qmin_double) {
nudged_zero_point = kMinScale;
} else if (zero_point_double >= qmax_double) {
nudged_zero_point = kMaxScale;
} else {
nudged_zero_point = static_cast<int8>(round(zero_point_double));
}
*scaling_factor = scale;
*offset = nudged_zero_point;
}
const int postamble_start =
RoundDownVectors<(2 * kFloatValuesPerNeonVector)>(size);
const float scaling_factor_inv =
*scaling_factor == 0 ? 0 : 1.0 / *scaling_factor;
const float32x4_t q_factor_f32x4 = vmovq_n_f32(scaling_factor_inv);
const int32x4_t scale_i32x4 = vmovq_n_s32(kMaxScale);
const int32x4_t neg_scale_i32x4 = vmovq_n_s32(kMinScale);
const int32x4_t offset_i32x4 = vmovq_n_s32(*offset);
int i = 0;
for (; i < postamble_start; i += 2 * kFloatValuesPerNeonVector) {
float32x4_t value0_f32x4 = vld1q_f32(&values[i]);
float32x4_t value1_f32x4 =
vld1q_f32(&values[i + kFloatValuesPerNeonVector]);
float32x4_t mul0_f32x4 = vmulq_f32(value0_f32x4, q_factor_f32x4);
float32x4_t mul1_f32x4 = vmulq_f32(value1_f32x4, q_factor_f32x4);
const int32x4_t f2i0_i32x4 = RoundToNearest(mul0_f32x4);
const int32x4_t f2i1_i32x4 = RoundToNearest(mul1_f32x4);
// Add offset
int32x4_t q0_i32x4 = vaddq_s32(f2i0_i32x4, offset_i32x4);
int32x4_t q1_i32x4 = vaddq_s32(f2i1_i32x4, offset_i32x4);
int32x4_t max0_i32x4 = vmaxq_s32(q0_i32x4, neg_scale_i32x4);
int32x4_t max1_i32x4 = vmaxq_s32(q1_i32x4, neg_scale_i32x4);
int32x4_t min0_i32x4 = vminq_s32(max0_i32x4, scale_i32x4);
int32x4_t min1_i32x4 = vminq_s32(max1_i32x4, scale_i32x4);
int16x4_t min0_16x4 = vmovn_s32(min0_i32x4);
int16x4_t min1_16x4 = vmovn_s32(min1_i32x4);
int16x8_t min_16x8 = vcombine_s16(min0_16x4, min1_16x4);
int8x8_t min_s8x8 = vqmovn_s16(min_16x8);
vst1_s8(&quantized_values[i], min_s8x8);
}
for (; TFLITE_UNLIKELY(i < size); ++i) {
const int32 quantized_value = static_cast<int32>(
*offset + TfLiteRound(scaling_factor_inv * values[i]));
quantized_values[i] =
std::min(kMaxScale, std::max(kMinScale, quantized_value));
}
}
float NeonVectorVectorDotProduct(const float* vector1, const float* vector2,
int v_size) {
// If v_size is not divisible by the vector size, then we need to process the
// final few elements sequentially. postamble_start shows the start index
// where this should happen.
const int postamble_start =
RoundDownVectors<kFloatValuesPerNeonVector>(v_size);
float32x4_t acc_32x4 = vmovq_n_f32(0.0);
int v = 0;
for (; v < postamble_start; v += kFloatValuesPerNeonVector) {
// Load 4 float values from vector1 and vector2 and accumulator.
float32x4_t v1_f32x4 = vld1q_f32(vector1 + v);
float32x4_t v2_f32x4 = vld1q_f32(vector2 + v);
// Vector multiply-accumulate 4 float
acc_32x4 = vmlaq_f32(acc_32x4, v1_f32x4, v2_f32x4);
}
float result = AccumulateNeonLane(acc_32x4);
// Postamble loop.
for (; TFLITE_UNLIKELY(v < v_size); v++) {
result += vector1[v] * vector2[v];
}
return result;
}
void NeonReductionSumVector(const float* input_vector, float* output_vector,
int output_size, int reduction_size) {
for (int o = 0; o < output_size; o++) {
// If v_size is not divisible by the vector size, then we need to process
// the final few elements sequentially. postamble_start shows the start
// index where this should happen.
const int postamble_start =
RoundDownVectors<kFloatValuesPerNeonVector>(reduction_size);
float32x4_t sum_f32x4 = vmovq_n_f32(0.0);
int r = 0;
for (; r < postamble_start; r += kFloatValuesPerNeonVector) {
float32x4_t v1_f32x4 = vld1q_f32(input_vector + r);
sum_f32x4 = vaddq_f32(sum_f32x4, v1_f32x4);
}
float sum = AccumulateNeonLane(sum_f32x4);
// Postamble loop.
for (; TFLITE_UNLIKELY(r < reduction_size); r++) {
sum += input_vector[r];
}
output_vector[o] = sum;
input_vector += reduction_size;
}
}
void NeonReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
const int output_size, const int reduction_size) {
const int postamble_half_start =
RoundDownVectors<kInt8ValuesPerNeonVector>(reduction_size);
const int postamble_start =
RoundDownVectors<(kInt8ValuesPerNeonVector / 2)>(reduction_size);
for (int o = 0; o < output_size; ++o) {
int32x4_t sum_32x4 = vmovq_n_s32(0);
int r = 0;
for (; r < postamble_half_start; r += kInt8ValuesPerNeonVector) {
const int8x16_t s2_8x16 = vld1q_s8(input_vector + r);
sum_32x4 = vpadalq_s16(sum_32x4, vpaddlq_s8(s2_8x16));
}
if (TFLITE_UNLIKELY(r < postamble_start)) {
const int8x8_t s2_8x8 = vld1_s8(input_vector + r);
sum_32x4 = vpadalq_s16(sum_32x4, vmovl_s8(s2_8x8));
r += (kInt8ValuesPerNeonVector >> 1);
}
int32_t sum = AccumulateNeonLane(sum_32x4);
for (; TFLITE_UNLIKELY(r < reduction_size); ++r) {
sum += input_vector[r];
}
output_vector[o] = sum;
input_vector += reduction_size;
}
}
void NeonVectorBatchVectorCwiseProductAccumulate(
const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
int32_t multiplier, int shift, int16_t* result) {
int32x4_t min_value_vector = vdupq_n_s32(-32768);
int32x4_t max_value_vector = vdupq_n_s32(32767);
for (int b = 0; b < n_batch; b++) {
int v = 0;
for (; v <= v_size - 16; v += 16) {
int32x4x4_t prod;
prod.val[0] = vmull_s16(vld1_s16(vector + v), vld1_s16(batch_vector));
prod.val[1] =
vmull_s16(vld1_s16(vector + v + 4), vld1_s16(batch_vector + 4));
prod.val[2] =
vmull_s16(vld1_s16(vector + v + 8), vld1_s16(batch_vector + 8));
prod.val[3] =
vmull_s16(vld1_s16(vector + v + 12), vld1_s16(batch_vector + 12));
batch_vector += 16;
prod = MultiplyByQuantizedMultiplier4Rows(prod, multiplier, shift);
int16x4x4_t results;
results.val[0] = vld1_s16(result);
results.val[1] = vld1_s16(result + 4);
results.val[2] = vld1_s16(result + 8);
results.val[3] = vld1_s16(result + 12);
prod.val[0] = vaddq_s32(prod.val[0], vmovl_s16(results.val[0]));
prod.val[1] = vaddq_s32(prod.val[1], vmovl_s16(results.val[1]));
prod.val[2] = vaddq_s32(prod.val[2], vmovl_s16(results.val[2]));
prod.val[3] = vaddq_s32(prod.val[3], vmovl_s16(results.val[3]));
prod.val[0] = vmaxq_s32(prod.val[0], min_value_vector);
prod.val[1] = vmaxq_s32(prod.val[1], min_value_vector);
prod.val[2] = vmaxq_s32(prod.val[2], min_value_vector);
prod.val[3] = vmaxq_s32(prod.val[3], min_value_vector);
prod.val[0] = vminq_s32(prod.val[0], max_value_vector);
prod.val[1] = vminq_s32(prod.val[1], max_value_vector);
prod.val[2] = vminq_s32(prod.val[2], max_value_vector);
prod.val[3] = vminq_s32(prod.val[3], max_value_vector);
vst1_s16(result, vmovn_s32(prod.val[0]));
vst1_s16(result + 4, vmovn_s32(prod.val[1]));
vst1_s16(result + 8, vmovn_s32(prod.val[2]));
vst1_s16(result + 12, vmovn_s32(prod.val[3]));
result += 16;
}
for (; TFLITE_UNLIKELY(v < v_size); v++) {
int32_t prod = vector[v] * *batch_vector++;
prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift);
int32_t output = prod + *result;
output = std::max(std::min(32767, output), -32768);
*result++ = output;
}
}
}
void NeonMeanStddevNormalization(const float* __restrict__ input_vector,
float* __restrict__ output_vector, int v_size,
int n_batch) {
constexpr int kBlockSize = kFloatValuesPerNeonVector * 4;
for (int batch = 0; batch < n_batch; ++batch) {
// Calculate sum
float32x4_t sum_f32x4_0 = vdupq_n_f32(0.0f);
float32x4_t sum_f32x4_1 = vdupq_n_f32(0.0f);
float32x4_t sum_f32x4_2 = vdupq_n_f32(0.0f);
float32x4_t sum_f32x4_3 = vdupq_n_f32(0.0f);
int i = 0;
for (; i <= v_size - kBlockSize; i += kBlockSize) {
const float32x4_t input_f32x4_0 =
vld1q_f32(input_vector + i + 0 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_1 =
vld1q_f32(input_vector + i + 1 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_2 =
vld1q_f32(input_vector + i + 2 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_3 =
vld1q_f32(input_vector + i + 3 * kFloatValuesPerNeonVector);
sum_f32x4_0 = vaddq_f32(sum_f32x4_0, input_f32x4_0);
sum_f32x4_1 = vaddq_f32(sum_f32x4_1, input_f32x4_1);
sum_f32x4_2 = vaddq_f32(sum_f32x4_2, input_f32x4_2);
sum_f32x4_3 = vaddq_f32(sum_f32x4_3, input_f32x4_3);
}
sum_f32x4_0 = vaddq_f32(sum_f32x4_0, sum_f32x4_2);
sum_f32x4_1 = vaddq_f32(sum_f32x4_1, sum_f32x4_3);
sum_f32x4_0 = vaddq_f32(sum_f32x4_0, sum_f32x4_1);
float sum = AccumulateNeonLane(sum_f32x4_0);
for (; TFLITE_UNLIKELY(i < v_size); ++i) {
sum += input_vector[i];
}
// Calculate mean
const float mean = sum / v_size;
const float32x4_t mean_f32x4 = vdupq_n_f32(mean);
// Calculate sum of squared differences
float32x4_t sum_diff_sq_f32x4_0 = vdupq_n_f32(0.0f);
float32x4_t sum_diff_sq_f32x4_1 = vdupq_n_f32(0.0f);
float32x4_t sum_diff_sq_f32x4_2 = vdupq_n_f32(0.0f);
float32x4_t sum_diff_sq_f32x4_3 = vdupq_n_f32(0.0f);
i = 0;
for (; i <= v_size - kBlockSize; i += kBlockSize) {
const float32x4_t input_f32x4_0 =
vld1q_f32(input_vector + i + 0 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_1 =
vld1q_f32(input_vector + i + 1 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_2 =
vld1q_f32(input_vector + i + 2 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_3 =
vld1q_f32(input_vector + i + 3 * kFloatValuesPerNeonVector);
const float32x4_t diff_f32x4_0 = vsubq_f32(input_f32x4_0, mean_f32x4);
const float32x4_t diff_f32x4_1 = vsubq_f32(input_f32x4_1, mean_f32x4);
const float32x4_t diff_f32x4_2 = vsubq_f32(input_f32x4_2, mean_f32x4);
const float32x4_t diff_f32x4_3 = vsubq_f32(input_f32x4_3, mean_f32x4);
sum_diff_sq_f32x4_0 =
vmlaq_f32(sum_diff_sq_f32x4_0, diff_f32x4_0, diff_f32x4_0);
sum_diff_sq_f32x4_1 =
vmlaq_f32(sum_diff_sq_f32x4_1, diff_f32x4_1, diff_f32x4_1);
sum_diff_sq_f32x4_2 =
vmlaq_f32(sum_diff_sq_f32x4_2, diff_f32x4_2, diff_f32x4_2);
sum_diff_sq_f32x4_3 =
vmlaq_f32(sum_diff_sq_f32x4_3, diff_f32x4_3, diff_f32x4_3);
}
sum_diff_sq_f32x4_0 = vaddq_f32(sum_diff_sq_f32x4_0, sum_diff_sq_f32x4_2);
sum_diff_sq_f32x4_1 = vaddq_f32(sum_diff_sq_f32x4_1, sum_diff_sq_f32x4_3);
sum_diff_sq_f32x4_0 = vaddq_f32(sum_diff_sq_f32x4_0, sum_diff_sq_f32x4_1);
float sum_diff_sq = AccumulateNeonLane(sum_diff_sq_f32x4_0);
for (; TFLITE_UNLIKELY(i < v_size); ++i) {
const float diff = input_vector[i] - mean;
sum_diff_sq += diff * diff;
}
// Calculate 1/stddev
const float variance = sum_diff_sq / v_size;
constexpr float kNormalizationConstant = 1e-8f;
const float stddev_inv =
1.0f / std::sqrt(variance + kNormalizationConstant);
// Do the normalization
i = 0;
for (; i <= v_size - kBlockSize; i += kBlockSize) {
const float32x4_t input_f32x4_0 =
vld1q_f32(input_vector + i + 0 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_1 =
vld1q_f32(input_vector + i + 1 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_2 =
vld1q_f32(input_vector + i + 2 * kFloatValuesPerNeonVector);
const float32x4_t input_f32x4_3 =
vld1q_f32(input_vector + i + 3 * kFloatValuesPerNeonVector);
const float32x4_t tmp_0 = vsubq_f32(input_f32x4_0, mean_f32x4);
const float32x4_t tmp_1 = vsubq_f32(input_f32x4_1, mean_f32x4);
const float32x4_t tmp_2 = vsubq_f32(input_f32x4_2, mean_f32x4);
const float32x4_t tmp_3 = vsubq_f32(input_f32x4_3, mean_f32x4);
const float32x4_t output_f32x4_0 = vmulq_n_f32(tmp_0, stddev_inv);
const float32x4_t output_f32x4_1 = vmulq_n_f32(tmp_1, stddev_inv);
const float32x4_t output_f32x4_2 = vmulq_n_f32(tmp_2, stddev_inv);
const float32x4_t output_f32x4_3 = vmulq_n_f32(tmp_3, stddev_inv);
vst1q_f32(output_vector + i + 0 * kFloatValuesPerNeonVector,
output_f32x4_0);
vst1q_f32(output_vector + i + 1 * kFloatValuesPerNeonVector,
output_f32x4_1);
vst1q_f32(output_vector + i + 2 * kFloatValuesPerNeonVector,
output_f32x4_2);
vst1q_f32(output_vector + i + 3 * kFloatValuesPerNeonVector,
output_f32x4_3);
}
for (; TFLITE_UNLIKELY(i < v_size); ++i) {
output_vector[i] = (input_vector[i] - mean) * stddev_inv;
}
// Advance to next batch
input_vector += v_size;
output_vector += v_size;
}
}
} // namespace tensor_utils
} // namespace tflite
#endif // USE_NEON
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.cc | C++ | apache-2.0 | 113,182 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_tensor_utils_impl.h"
#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h"
namespace tflite {
namespace tensor_utils {
void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
int m_cols, const float* vector,
int n_batch, float* result) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vector, n_batch, result);
}
void MatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix,
const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* scaling_factors,
int n_batch,
float* __restrict__ result) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vectors, scaling_factors, n_batch, result);
}
void MatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix,
const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* scaling_factors,
int n_batch, int32_t* scratch,
float* __restrict__ result,
CpuBackendContext* context) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vectors, scaling_factors, n_batch, scratch, result, context);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vectors, scaling_factors, n_batch, result, per_channel_scale,
input_offset, scratch, row_sums, compute_row_sums, context);
}
void SparseMatrixBatchVectorMultiplyAccumulate1x4(
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
const int32_t* __restrict__ indices, int m_rows, int m_cols,
const float* __restrict__ vector, int n_batch, float* __restrict__ result) {
NEON_OR_PORTABLE(SparseMatrixBatchVectorMultiplyAccumulate1x4, matrix,
segments, indices, m_rows, m_cols, vector, n_batch, result);
}
void SparseMatrixBatchVectorMultiplyAccumulate(
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
float* __restrict__ result) {
NEON_OR_PORTABLE(SparseMatrixBatchVectorMultiplyAccumulate, matrix, ledger,
m_rows, m_cols, vector, n_batch, result);
}
void SparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
const int m_cols, const int8_t* __restrict__ vectors,
const float* scaling_factors, int n_batch, float* __restrict__ result) {
NEON_OR_PORTABLE(SparseMatrixBatchVectorMultiplyAccumulate, matrix, ledger,
m_rows, m_cols, vectors, scaling_factors, n_batch, result);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int16_t* output, CpuBackendContext* context) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, input, bias,
input_to_gate_weights, multiplier, shift, n_batch, n_input,
n_output, output_zp, scratch, output, context);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int8_t* output, CpuBackendContext* context) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, input, bias,
input_to_gate_weights, multiplier, shift, n_batch, n_input,
n_output, output_zp, scratch, output, context);
}
void MatrixBatchVectorMultiply(const int8_t* input, int32_t input_zeropoint,
const int8_t* input_to_gate_weights,
int32_t input_to_gate_effective_scale_a,
int32_t input_to_gate_effective_scale_b,
int32_t n_batch, int32_t n_input, int32_t n_cell,
int8_t* gate_output, int8_t gate_output_zp) {
PortableMatrixBatchVectorMultiply(
input, input_zeropoint, input_to_gate_weights,
input_to_gate_effective_scale_a, input_to_gate_effective_scale_b, n_batch,
n_input, n_cell, gate_output, gate_output_zp);
}
void MatrixBatchVectorMultiply(const int16_t* hidden,
const int8_t* hidden_to_output_weights,
int32_t proj_effective_scale_a,
int32_t proj_effective_scale_b,
const int32_t* gate_bias, int32_t n_batch,
int32_t n_hidden, int32_t n_output,
int32_t output_zp, int8_t* proj_output) {
PortableMatrixBatchVectorMultiply(hidden, hidden_to_output_weights,
proj_effective_scale_a,
proj_effective_scale_b, gate_bias, n_batch,
n_hidden, n_output, output_zp, proj_output);
}
void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar,
int32_t n_row, int32_t n_col,
int32_t* output) {
NEON_OR_PORTABLE(MatrixScalarMultiplyAccumulate, matrix, scalar, n_row, n_col,
output);
}
void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights,
const int32_t* bias, int32_t layer_norm_scale_a,
int32_t layer_norm_scale_b, int32_t variance_limit,
int n_batch, int n_input, int16_t* output) {
NEON_OR_PORTABLE(ApplyLayerNorm, input, layer_norm_weights, bias,
layer_norm_scale_a, layer_norm_scale_b, variance_limit,
n_batch, n_input, output);
}
void ApplyLayerNormFloat(const int16_t* input,
const int16_t* layer_norm_weights,
int32_t layer_norm_scale_a, int32_t layer_norm_scale_b,
const int32_t* bias, int n_batch, int n_input,
int16_t* output) {
PortableApplyLayerNormFloat(input, layer_norm_weights, layer_norm_scale_a,
layer_norm_scale_b, bias, n_batch, n_input,
output);
}
void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output) {
NEON_OR_PORTABLE(ApplySigmoid, input, n_batch, n_input, output);
}
void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output) {
PortableApplySigmoidFloat(input, n_batch, n_input, output);
}
void ApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output) {
NEON_OR_PORTABLE(ApplyTanh, integer_bits, input, n_batch, n_input, output);
}
void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
int32_t integer_bits, int16_t* output) {
PortableApplyTanhFloat(input, n_batch, n_input, integer_bits, output);
}
void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int shift, int16_t* output) {
NEON_OR_PORTABLE(CwiseMul, input_1, input_2, n_batch, n_input, shift, output);
}
void CwiseMul(const int16_t* input_1, const int16_t* input_2,
int32_t multiplier, int shift, int n_batch, int n_input,
int32_t output_zp, int8_t* output) {
NEON_OR_PORTABLE(CwiseMul, input_1, input_2, multiplier, shift, n_batch,
n_input, output_zp, output);
}
void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int16_t* output) {
NEON_OR_PORTABLE(CwiseAdd, input_1, input_2, n_batch, n_input, output);
}
void CwiseClipping(float* vector, const int v_size,
const float clipping_value) {
NEON_OR_PORTABLE(CwiseClipping, vector, v_size, clipping_value);
}
void CwiseClipping(int16_t* vector, const int v_size,
const int16_t clipping_value) {
NEON_OR_PORTABLE(CwiseClipping, vector, v_size, clipping_value);
}
void CwiseClipping(int8_t* vector, const int v_size,
const int8_t clipping_value) {
NEON_OR_PORTABLE(CwiseClipping, vector, v_size, clipping_value);
}
void BatchVectorBatchVectorDotProduct(const int16_t* vector1,
const int16_t* vector2, int v_size,
int n_batch, int32_t* result) {
PortableBatchVectorBatchVectorDotProduct(vector1, vector2, v_size, n_batch,
result);
}
void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size,
const int16_t* batch_vector,
int n_batch, int32_t multiplier,
int shift, int16_t* result) {
NEON_OR_PORTABLE(VectorBatchVectorCwiseProductAccumulate, vector, v_size,
batch_vector, n_batch, multiplier, shift, result);
}
float VectorVectorDotProduct(const float* vector1, const float* vector2,
int v_size) {
return NEON_OR_PORTABLE(VectorVectorDotProduct, vector1, vector2, v_size);
}
void Sub1Vector(const float* vector, int v_size, float* result) {
NEON_OR_PORTABLE(Sub1Vector, vector, v_size, result);
}
void Sub1Vector(const int16_t* vector, int v_size, int16_t* result) {
NEON_OR_PORTABLE(Sub1Vector, vector, v_size, result);
}
// Check if all entries of a vector are zero for float.
bool IsZeroVector(const float* vector, int v_size) {
return NEON_OR_PORTABLE(IsZeroVector, vector, v_size);
}
// Check if all entries of a vector are zero for int8.
bool IsZeroVector(const int8_t* vector, int v_size) {
return NEON_OR_PORTABLE(IsZeroVector, vector, v_size);
}
void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
float* result) {
NEON_OR_PORTABLE(VectorScalarMultiply, vector, v_size, scale, result);
}
void SymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min_value,
float* max_value, float* scaling_factor) {
NEON_OR_PORTABLE(SymmetricQuantizeFloats, values, size, quantized_values,
min_value, max_value, scaling_factor);
}
void SymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float min_value,
float max_value, float* scaling_factor) {
NEON_OR_PORTABLE(SymmetricQuantizeFloats, values, size, quantized_values,
min_value, max_value, scaling_factor);
}
void AsymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* scaling_factor,
int32_t* offset) {
NEON_OR_PORTABLE(AsymmetricQuantizeFloats, values, size, quantized_values,
scaling_factor, offset);
}
void ReductionSumVector(const float* input_vector, float* output_vector,
int output_size, int reduction_size) {
NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size,
reduction_size);
}
void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector,
int output_size, int reduction_size) {
PortableReductionSumVector(input_vector, output_vector, output_size,
reduction_size);
}
void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
int output_size, int reduction_size) {
NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size,
reduction_size);
}
void MeanStddevNormalization(const float* __restrict__ input_vector,
float* __restrict__ output_vector, int v_size,
int n_batch) {
NEON_OR_PORTABLE(MeanStddevNormalization, input_vector, output_vector, v_size,
n_batch);
}
void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
const int8_t* recurrent, int8_t recurrent_zp,
int32_t input_effective_scale_a,
int32_t input_effective_scale_b,
int32_t recurrent_effective_scale_a,
int32_t recurrent_effective_scale_b, int32_t n_batch,
int32_t n_cell, int16_t* output) {
PortableTwoGateSaturatingAdd(
input, input_zp, recurrent, recurrent_zp, input_effective_scale_a,
input_effective_scale_b, recurrent_effective_scale_a,
recurrent_effective_scale_b, n_batch, n_cell, output);
}
} // namespace tensor_utils
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils.h | C++ | apache-2.0 | 15,141 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_IMPL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_IMPL_H_
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#if defined(_MSC_VER)
#define __restrict__ __restrict
#endif
namespace tflite {
namespace tensor_utils {
#ifdef USE_NEON
// Multiply a matrix by a batch vector, and store results in a batch-size
// vector.
void NeonMatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
int m_cols, const float* vector,
int n_batch, float* result);
// Matrix multiplication for quantized values using symmetric quantization.
void NeonMatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix,
const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* scaling_factors,
int n_batch,
float* __restrict__ result);
// Same as above but with a scratch buffer and CpuBackendContext for the
// int8 x int8 -> int32 accumulation computation
void NeonMatrixBatchVectorMultiplyAccumulate(const int8_t* __restrict__ matrix,
const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* scaling_factors,
int n_batch, int32_t* scratch,
float* __restrict__ result,
CpuBackendContext* context);
// Matrix multiplication for quantized values using asymmetric quantization.
void NeonMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context);
void NeonApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights,
const int32_t* bias, int32_t layer_norm_scale_a,
int32_t layer_norm_scale_b, int32_t variance_limit,
int n_batch, int n_input, int16_t* output);
void NeonApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output);
void NeonApplyTanh(int32_t integer_bits, const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output);
void NeonCwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int shift, int16_t* output);
void NeonCwiseMul(const int16_t* input_1, const int16_t* input_2,
int32_t multiplier, int shift, int n_batch, int n_input,
int32_t output_zp, int8_t* output);
void NeonCwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int16_t* output);
void NeonCwiseClipping(float* vector, const int v_size,
const float clipping_value);
void NeonCwiseClipping(int16_t* vector, const int v_size,
const int16_t clipping_value);
void NeonCwiseClipping(int8_t* vector, const int v_size,
const int8_t clipping_value);
void NeonMatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int8_t* output, CpuBackendContext* context);
void NeonMatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int16_t* output, CpuBackendContext* context);
void NeonMatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar,
int32_t n_row, int32_t n_col,
int32_t* output);
void NeonSparseMatrixBatchVectorMultiplyAccumulate1x4(
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
const int32_t* __restrict__ indices, int m_rows, int m_cols,
const float* __restrict__ vector, int n_batch, float* __restrict__ result);
// Multiply a matrix by a batch vector, and store results in a batch-size
// vector. Sparse version.
void NeonSparseMatrixBatchVectorMultiplyAccumulate(
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
float* __restrict__ result);
// Matrix multiplication for quantized values using symmetric quantization.
// Sparse version.
void NeonSparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
const int m_cols, const int8_t* __restrict__ vectors,
const float* scaling_factors, int n_batch, float* __restrict__ result);
// Dot product of two vectors.
float NeonVectorVectorDotProduct(const float* vector1, const float* vector2,
int v_size);
// Compute "1.0f - elements of vector" (used in CIFG).
void NeonSub1Vector(const float* vector, int v_size, float* result);
void NeonSub1Vector(const int16_t* vector, int v_size, int16_t* result);
// Multiply all elements of vector with a scalar.
void NeonVectorScalarMultiply(const int8_t* vector, int v_size, float scale,
float* result);
// Check if all entries of a vector are zero.
bool NeonIsZeroVector(const float* vector, int v_size);
// Check if all entries of a vector are zero.
bool NeonIsZeroVector(const int8_t* vector, int v_size);
// Symmetric quantizer.
void NeonSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min,
float* max, float* scaling_factor);
// Symmetric quantizer.
void NeonSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float min, float max,
float* scaling_factor);
// Asymmetric quantizer.
void NeonAsymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values,
float* scaling_factor, int32_t* offset);
// Reduce-sum on a float input vector:
// input_vector: float pointer to input vector.
// output_vector: float pointer to vector.
// output_size: output vector size.
// reduction_size: number of consecutive elements from input vector which are
// added to get one element of output.
void NeonReductionSumVector(const float* input_vector, float* output_vector,
int output_size, int reduction_size);
void NeonReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
int output_size, int reduction_size);
void NeonVectorBatchVectorCwiseProductAccumulate(
const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
int32_t multiplier, int shift, int16_t* result);
// Layer norm for each batch.
void NeonMeanStddevNormalization(const float* __restrict__ input_vector,
float* __restrict__ output_vector, int v_size,
int n_batch);
#endif // USE_NEON
} // namespace tensor_utils
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_NEON_TENSOR_UTILS_IMPL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/neon_tensor_utils_impl.h | C++ | apache-2.0 | 8,783 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_OPTIMIZED_OPS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_OPTIMIZED_OPS_H_
#include <assert.h>
#include <stdint.h>
#include <sys/types.h>
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <limits>
#include <memory>
#include <tuple>
#include <type_traits>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/reference/add.h"
#include "tensorflow/lite/kernels/internal/reference/resize_nearest_neighbor.h"
#if defined(TF_LITE_USE_CBLAS) && defined(__APPLE__)
#include <Accelerate/Accelerate.h>
#endif
#include "third_party/eigen3/Eigen/Core"
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include "fixedpoint/fixedpoint.h"
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/optimized/cpu_check.h"
#include "tensorflow/lite/kernels/internal/optimized/im2col_utils.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/strided_slice_logic.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/transpose_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
#if __aarch64__ && __clang__
#define TFLITE_SOFTMAX_USE_UINT16_LUT
#endif
namespace tflite {
namespace optimized_ops {
// Unoptimized reference ops:
using reference_ops::Broadcast4DSlowGreater;
using reference_ops::Broadcast4DSlowGreaterEqual;
using reference_ops::Broadcast4DSlowGreaterEqualWithScaling;
using reference_ops::Broadcast4DSlowGreaterWithScaling;
using reference_ops::Broadcast4DSlowLess;
using reference_ops::Broadcast4DSlowLessEqual;
using reference_ops::Broadcast4DSlowLessEqualWithScaling;
using reference_ops::Broadcast4DSlowLessWithScaling;
using reference_ops::BroadcastAdd4DSlow;
using reference_ops::BroadcastMul4DSlow;
using reference_ops::BroadcastSub16POTSlow;
using reference_ops::BroadcastSubSlow;
using reference_ops::Concatenation;
using reference_ops::ConcatenationWithScaling;
using reference_ops::DepthConcatenation;
using reference_ops::Div;
using reference_ops::Elu;
using reference_ops::FakeQuant;
using reference_ops::Fill;
using reference_ops::Gather;
using reference_ops::Greater;
using reference_ops::GreaterEqual;
using reference_ops::GreaterEqualWithScaling;
using reference_ops::GreaterWithScaling;
using reference_ops::LeakyRelu;
using reference_ops::Less;
using reference_ops::LessEqual;
using reference_ops::LessEqualWithScaling;
using reference_ops::LessWithScaling;
using reference_ops::Mean;
using reference_ops::ProcessBroadcastShapes;
using reference_ops::RankOneSelect;
using reference_ops::Relu1;
using reference_ops::Relu6;
using reference_ops::ReluX;
using reference_ops::Round;
using reference_ops::Select;
using reference_ops::SpaceToBatchND;
using reference_ops::Split;
using reference_ops::Sub16;
// TODO(b/80247582) Remove this constant.
// This will be phased out as the shifts are revised with more thought. Use of a
// constant enables us to track progress on this work.
//
// Used to convert from old-style shifts (right) to new-style (left).
static constexpr int kReverseShift = -1;
// Make a local VectorMap typedef allowing to map a float array
// as a Eigen vector expression. The std::conditional here is to
// construct the suitable Eigen type for the constness of the
// data. Indeed, for const data, we need to produce
// Eigen::Map<const Eigen::Matrix<float, ...>>
// and not the more straightforward
// Eigen::Map<Eigen::Matrix<const float, ...>>
template <typename Scalar>
using VectorMap = typename std::conditional<
std::is_const<Scalar>::value,
Eigen::Map<const Eigen::Matrix<typename std::remove_const<Scalar>::type,
Eigen::Dynamic, 1>>,
Eigen::Map<Eigen::Matrix<Scalar, Eigen::Dynamic, 1>>>::type;
template <typename Scalar>
VectorMap<Scalar> MapAsVector(Scalar* data, const RuntimeShape& shape) {
const int size = shape.FlatSize();
return VectorMap<Scalar>(data, size, 1);
}
// Make a local VectorMap typedef allowing to map a float array
// as a Eigen matrix expression. The same explanation as for VectorMap
// above also applies here.
template <typename Scalar>
using MatrixMap = typename std::conditional<
std::is_const<Scalar>::value,
Eigen::Map<const Eigen::Matrix<typename std::remove_const<Scalar>::type,
Eigen::Dynamic, Eigen::Dynamic>>,
Eigen::Map<Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic>>>::type;
template <typename Scalar>
MatrixMap<Scalar> MapAsMatrixWithLastDimAsRows(Scalar* data,
const RuntimeShape& shape) {
const int dims_count = shape.DimensionsCount();
const int rows = shape.Dims(dims_count - 1);
const int cols = FlatSizeSkipDim(shape, dims_count - 1);
return MatrixMap<Scalar>(data, rows, cols);
}
template <typename Scalar>
MatrixMap<Scalar> MapAsMatrixWithFirstDimAsCols(Scalar* data,
const RuntimeShape& shape) {
const int cols = shape.Dims(0);
const int rows = FlatSizeSkipDim(shape, 0);
return MatrixMap<Scalar>(data, rows, cols);
}
template <typename Scalar>
using ArrayMap = typename std::conditional<
std::is_const<Scalar>::value,
Eigen::Map<const Eigen::Array<typename std::remove_const<Scalar>::type,
Eigen::Dynamic, Eigen::Dynamic>>,
Eigen::Map<Eigen::Array<Scalar, Eigen::Dynamic, Eigen::Dynamic>>>::type;
template <typename Scalar>
ArrayMap<Scalar> MapAsArrayWithLastDimAsRows(Scalar* data,
const RuntimeShape& shape) {
const int dims_count = shape.DimensionsCount();
const int rows = shape.Dims(dims_count - 1);
const int cols = FlatSizeSkipDim(shape, dims_count - 1);
return ArrayMap<Scalar>(data, rows, cols);
}
// Copied from tensorflow/core/framework/tensor_types.h
template <typename T, int NDIMS = 1, typename IndexType = Eigen::DenseIndex>
struct TTypes {
// Rank-1 tensor (vector) of scalar type T.
typedef Eigen::TensorMap<Eigen::Tensor<T, 1, Eigen::RowMajor, IndexType>,
Eigen::Aligned>
Flat;
typedef Eigen::TensorMap<
Eigen::Tensor<const T, 2, Eigen::RowMajor, IndexType>>
UnalignedConstMatrix;
};
// TODO(b/62193649): this function is only needed as long
// as we have the --variable_batch hack.
template <typename Scalar>
MatrixMap<Scalar> MapAsMatrixWithGivenNumberOfRows(Scalar* data,
const RuntimeShape& shape,
int rows) {
const int flatsize = shape.FlatSize();
TFLITE_DCHECK_EQ(flatsize % rows, 0);
const int cols = flatsize / rows;
return MatrixMap<Scalar>(data, rows, cols);
}
template <typename ElementwiseF, typename ScalarBroadcastF, typename T>
inline void BinaryBroadcastFiveFold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const T* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const T* unswitched_input2_data,
const RuntimeShape& output_shape,
T* output_data, ElementwiseF elementwise_f,
ScalarBroadcastF scalar_broadcast_f) {
ArithmeticParams switched_params = unswitched_params;
switched_params.input1_offset = unswitched_params.input2_offset;
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
switched_params.input1_shift = unswitched_params.input2_shift;
switched_params.input2_offset = unswitched_params.input1_offset;
switched_params.input2_multiplier = unswitched_params.input1_multiplier;
switched_params.input2_shift = unswitched_params.input1_shift;
const bool use_unswitched =
unswitched_params.broadcast_category ==
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
const ArithmeticParams& params =
use_unswitched ? unswitched_params : switched_params;
const T* input1_data =
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
const T* input2_data =
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
// sections of the arrays.
T* output_data_ptr = output_data;
const T* input1_data_ptr = input1_data;
const T* input2_data_reset = input2_data;
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
// between input shapes. y3 for input 1 is always broadcast, and so the
// dimension there is 1, whereas optionally y1 might be broadcast for
// input 2. Put another way, input1.shape.FlatSize = y0 * y1 * y2 * y4,
// input2.shape.FlatSize = y0 * y2 * y3 * y4.
int y0 = params.broadcast_shape[0];
int y1 = params.broadcast_shape[1];
int y2 = params.broadcast_shape[2];
int y3 = params.broadcast_shape[3];
int y4 = params.broadcast_shape[4];
if (y4 > 1) {
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
// dimension.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
for (int i3 = 0; i3 < y3; ++i3) {
elementwise_f(y4, params, input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y4;
output_data_ptr += y4;
}
// We have broadcast y4 of input1 data y3 times, and now move on.
input1_data_ptr += y4;
}
}
// We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
input2_data_reset = input2_data_ptr;
}
} else {
// Special case of y4 == 1, in which the innermost loop is a single
// element and can be combined with the next (y3) as an inner broadcast.
//
// Note that this handles the case of pure scalar broadcast when
// y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
// broadcast with batch (as y2 > 1).
//
// NOTE The process is the same as the above general case except
// simplified for y4 == 1 and the loop over y3 is contained within the
// AddScalarBroadcast function.
for (int i0 = 0; i0 < y0; ++i0) {
const T* input2_data_ptr = nullptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
scalar_broadcast_f(y3, params, *input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y3;
output_data_ptr += y3;
input1_data_ptr += 1;
}
}
input2_data_reset = input2_data_ptr;
}
}
}
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
// Looks up each element of <indices> in <table>, returns them in a vector.
inline uint8x16_t aarch64_lookup_vector(const uint8x16x4_t table[4],
uint8x16_t indices) {
// Look up in 1st quarter of the table: top 2 bits of indices == 00
uint8x16_t output1 = vqtbl4q_u8(table[0], indices);
// Look up in 2nd quarter of the table: top 2 bits of indices == 01
uint8x16_t output2 =
vqtbl4q_u8(table[1], veorq_u8(indices, vdupq_n_u8(0x40)));
// Look up in 3rd quarter of the table: top 2 bits of indices == 10
uint8x16_t output3 =
vqtbl4q_u8(table[2], veorq_u8(indices, vdupq_n_u8(0x80)));
// Look up in 4th quarter of the table: top 2 bits of indices == 11
uint8x16_t output4 =
vqtbl4q_u8(table[3], veorq_u8(indices, vdupq_n_u8(0xc0)));
// Combine result of the 4 lookups.
return vorrq_u8(vorrq_u8(output1, output2), vorrq_u8(output3, output4));
}
#endif
inline void AddBiasAndEvalActivationFunction(float output_activation_min,
float output_activation_max,
const RuntimeShape& bias_shape,
const float* bias_data,
const RuntimeShape& array_shape,
float* array_data) {
BiasAndClamp(output_activation_min, output_activation_max,
bias_shape.FlatSize(), bias_data, array_shape.FlatSize(),
array_data);
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& weights_shape,
const float* weights_data, const RuntimeShape& bias_shape,
const float* optional_bias_data, const RuntimeShape& output_shape,
float* output_data, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("FullyConnected");
const int dims_count = weights_shape.DimensionsCount();
const int input_rows = weights_shape.Dims(dims_count - 1);
cpu_backend_gemm::MatrixParams<float> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = input_rows;
rhs_params.cols = input_shape.FlatSize() / input_rows;
rhs_params.cache_policy =
cpu_backend_gemm::DefaultCachePolicy(params.rhs_cacheable);
TFLITE_DCHECK_EQ(input_shape.FlatSize(), rhs_params.rows * rhs_params.cols);
cpu_backend_gemm::MatrixParams<float> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.cols = weights_shape.Dims(dims_count - 1);
lhs_params.rows = FlatSizeSkipDim(weights_shape, dims_count - 1);
lhs_params.cache_policy =
cpu_backend_gemm::DefaultCachePolicy(params.lhs_cacheable);
cpu_backend_gemm::MatrixParams<float> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = output_shape.Dims(output_shape.DimensionsCount() - 1);
dst_params.cols =
FlatSizeSkipDim(output_shape, output_shape.DimensionsCount() - 1);
cpu_backend_gemm::GemmParams<float, float> gemm_params;
gemm_params.bias = optional_bias_data;
gemm_params.clamp_min = params.float_activation_min;
gemm_params.clamp_max = params.float_activation_max;
cpu_backend_gemm::Gemm(lhs_params, weights_data, rhs_params, input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("FullyConnected/8bit");
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int filter_rows = filter_shape.Dims(filter_dim_count - 2);
const int filter_cols = filter_shape.Dims(filter_dim_count - 1);
TFLITE_DCHECK_EQ(filter_shape.FlatSize(), filter_rows * filter_cols);
const int output_rows = output_shape.Dims(output_dim_count - 1);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
}
cpu_backend_gemm::MatrixParams<uint8> lhs_params;
lhs_params.rows = filter_rows;
lhs_params.cols = filter_cols;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.zero_point = -filter_offset;
lhs_params.cache_policy =
cpu_backend_gemm::DefaultCachePolicy(params.lhs_cacheable);
cpu_backend_gemm::MatrixParams<uint8> rhs_params;
rhs_params.rows = filter_cols;
rhs_params.cols = batches;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.zero_point = -input_offset;
rhs_params.cache_policy =
cpu_backend_gemm::DefaultCachePolicy(params.rhs_cacheable);
cpu_backend_gemm::MatrixParams<uint8> dst_params;
dst_params.rows = filter_rows;
dst_params.cols = batches;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.zero_point = output_offset;
cpu_backend_gemm::GemmParams<int32, uint8> gemm_params;
gemm_params.bias = bias_data;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
gemm_params.multiplier_fixedpoint = output_multiplier;
gemm_params.multiplier_exponent = output_shift;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data_int32, const RuntimeShape& output_shape,
int16* output_data, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("FullyConnected/Uint8Int16");
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(output_offset, 0);
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
cpu_backend_gemm::MatrixParams<uint8> lhs_params;
lhs_params.rows = output_depth;
lhs_params.cols = accum_depth;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.zero_point = -filter_offset;
lhs_params.cache_policy =
cpu_backend_gemm::DefaultCachePolicy(params.lhs_cacheable);
cpu_backend_gemm::MatrixParams<uint8> rhs_params;
rhs_params.rows = accum_depth;
rhs_params.cols = batches;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.zero_point = -input_offset;
rhs_params.cache_policy =
cpu_backend_gemm::DefaultCachePolicy(params.rhs_cacheable);
cpu_backend_gemm::MatrixParams<int16> dst_params;
dst_params.rows = output_depth;
dst_params.cols = batches;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.zero_point = 0;
cpu_backend_gemm::GemmParams<int32, int16> gemm_params;
gemm_params.bias = bias_data_int32;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
gemm_params.multiplier_fixedpoint = output_multiplier;
gemm_params.multiplier_exponent = output_shift;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
}
// Internal function doing the actual arithmetic work for
// ShuffledFullyConnected.
// May be called either directly by it (single-threaded case) or may be used
// as the 'task' for worker threads to run (multi-threaded case, see
// ShuffledFullyConnectedWorkerTask below).
inline void ShuffledFullyConnectedWorkerImpl(
const uint8* shuffled_input_workspace_data,
const int8* shuffled_weights_data, int batches, int output_depth,
int output_stride, int accum_depth, const int32* bias_data,
int32 output_multiplier, int output_shift, int16* output_data) {
#if defined USE_NEON
const int8* shuffled_weights_ptr = shuffled_weights_data;
if (batches == 1) {
const int right_shift = output_shift > 0 ? 0 : -output_shift;
const int left_shift = output_shift > 0 ? output_shift : 0;
for (int c = 0; c < output_depth; c += 4) {
// Accumulation loop.
int32x4_t row_accum0 = vdupq_n_s32(0);
int32x4_t row_accum1 = vdupq_n_s32(0);
int32x4_t row_accum2 = vdupq_n_s32(0);
int32x4_t row_accum3 = vdupq_n_s32(0);
for (int d = 0; d < accum_depth; d += 16) {
int8x16_t weights0 = vld1q_s8(shuffled_weights_ptr + 0);
int8x16_t weights1 = vld1q_s8(shuffled_weights_ptr + 16);
int8x16_t weights2 = vld1q_s8(shuffled_weights_ptr + 32);
int8x16_t weights3 = vld1q_s8(shuffled_weights_ptr + 48);
shuffled_weights_ptr += 64;
int8x16_t input =
vreinterpretq_s8_u8(vld1q_u8(shuffled_input_workspace_data + d));
int16x8_t local_accum0 =
vmull_s8(vget_low_s8(weights0), vget_low_s8(input));
int16x8_t local_accum1 =
vmull_s8(vget_low_s8(weights1), vget_low_s8(input));
int16x8_t local_accum2 =
vmull_s8(vget_low_s8(weights2), vget_low_s8(input));
int16x8_t local_accum3 =
vmull_s8(vget_low_s8(weights3), vget_low_s8(input));
local_accum0 =
vmlal_s8(local_accum0, vget_high_s8(weights0), vget_high_s8(input));
local_accum1 =
vmlal_s8(local_accum1, vget_high_s8(weights1), vget_high_s8(input));
local_accum2 =
vmlal_s8(local_accum2, vget_high_s8(weights2), vget_high_s8(input));
local_accum3 =
vmlal_s8(local_accum3, vget_high_s8(weights3), vget_high_s8(input));
row_accum0 = vpadalq_s16(row_accum0, local_accum0);
row_accum1 = vpadalq_s16(row_accum1, local_accum1);
row_accum2 = vpadalq_s16(row_accum2, local_accum2);
row_accum3 = vpadalq_s16(row_accum3, local_accum3);
}
// Horizontally reduce accumulators
int32x2_t pairwise_reduced_acc_0, pairwise_reduced_acc_1,
pairwise_reduced_acc_2, pairwise_reduced_acc_3;
pairwise_reduced_acc_0 =
vpadd_s32(vget_low_s32(row_accum0), vget_high_s32(row_accum0));
pairwise_reduced_acc_1 =
vpadd_s32(vget_low_s32(row_accum1), vget_high_s32(row_accum1));
pairwise_reduced_acc_2 =
vpadd_s32(vget_low_s32(row_accum2), vget_high_s32(row_accum2));
pairwise_reduced_acc_3 =
vpadd_s32(vget_low_s32(row_accum3), vget_high_s32(row_accum3));
const int32x2_t reduced_lo =
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1);
const int32x2_t reduced_hi =
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3);
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi);
// Add bias values.
int32x4_t bias_vec = vld1q_s32(bias_data + c);
reduced = vaddq_s32(reduced, bias_vec);
reduced = vshlq_s32(reduced, vdupq_n_s32(left_shift));
// Multiply by the fixed-point multiplier.
reduced = vqrdmulhq_n_s32(reduced, output_multiplier);
// Rounding-shift-right.
using gemmlowp::RoundingDivideByPOT;
reduced = RoundingDivideByPOT(reduced, right_shift);
// Narrow values down to 16 bit signed.
const int16x4_t res16 = vqmovn_s32(reduced);
vst1_s16(output_data + c, res16);
}
} else if (batches == 4) {
const int right_shift = output_shift > 0 ? 0 : -output_shift;
const int left_shift = output_shift > 0 ? output_shift : 0;
for (int c = 0; c < output_depth; c += 4) {
const int8* shuffled_input_ptr =
reinterpret_cast<const int8*>(shuffled_input_workspace_data);
// Accumulation loop.
int32x4_t row_accum00 = vdupq_n_s32(0);
int32x4_t row_accum10 = vdupq_n_s32(0);
int32x4_t row_accum20 = vdupq_n_s32(0);
int32x4_t row_accum30 = vdupq_n_s32(0);
int32x4_t row_accum01 = vdupq_n_s32(0);
int32x4_t row_accum11 = vdupq_n_s32(0);
int32x4_t row_accum21 = vdupq_n_s32(0);
int32x4_t row_accum31 = vdupq_n_s32(0);
int32x4_t row_accum02 = vdupq_n_s32(0);
int32x4_t row_accum12 = vdupq_n_s32(0);
int32x4_t row_accum22 = vdupq_n_s32(0);
int32x4_t row_accum32 = vdupq_n_s32(0);
int32x4_t row_accum03 = vdupq_n_s32(0);
int32x4_t row_accum13 = vdupq_n_s32(0);
int32x4_t row_accum23 = vdupq_n_s32(0);
int32x4_t row_accum33 = vdupq_n_s32(0);
for (int d = 0; d < accum_depth; d += 16) {
int8x16_t weights0 = vld1q_s8(shuffled_weights_ptr + 0);
int8x16_t weights1 = vld1q_s8(shuffled_weights_ptr + 16);
int8x16_t weights2 = vld1q_s8(shuffled_weights_ptr + 32);
int8x16_t weights3 = vld1q_s8(shuffled_weights_ptr + 48);
shuffled_weights_ptr += 64;
int8x16_t input0 = vld1q_s8(shuffled_input_ptr + 0);
int8x16_t input1 = vld1q_s8(shuffled_input_ptr + 16);
int8x16_t input2 = vld1q_s8(shuffled_input_ptr + 32);
int8x16_t input3 = vld1q_s8(shuffled_input_ptr + 48);
shuffled_input_ptr += 64;
int16x8_t local_accum0, local_accum1, local_accum2, local_accum3;
#define TFLITE_SHUFFLED_FC_ACCUM(B) \
local_accum0 = vmull_s8(vget_low_s8(weights0), vget_low_s8(input##B)); \
local_accum1 = vmull_s8(vget_low_s8(weights1), vget_low_s8(input##B)); \
local_accum2 = vmull_s8(vget_low_s8(weights2), vget_low_s8(input##B)); \
local_accum3 = vmull_s8(vget_low_s8(weights3), vget_low_s8(input##B)); \
local_accum0 = \
vmlal_s8(local_accum0, vget_high_s8(weights0), vget_high_s8(input##B)); \
local_accum1 = \
vmlal_s8(local_accum1, vget_high_s8(weights1), vget_high_s8(input##B)); \
local_accum2 = \
vmlal_s8(local_accum2, vget_high_s8(weights2), vget_high_s8(input##B)); \
local_accum3 = \
vmlal_s8(local_accum3, vget_high_s8(weights3), vget_high_s8(input##B)); \
row_accum0##B = vpadalq_s16(row_accum0##B, local_accum0); \
row_accum1##B = vpadalq_s16(row_accum1##B, local_accum1); \
row_accum2##B = vpadalq_s16(row_accum2##B, local_accum2); \
row_accum3##B = vpadalq_s16(row_accum3##B, local_accum3);
TFLITE_SHUFFLED_FC_ACCUM(0)
TFLITE_SHUFFLED_FC_ACCUM(1)
TFLITE_SHUFFLED_FC_ACCUM(2)
TFLITE_SHUFFLED_FC_ACCUM(3)
#undef TFLITE_SHUFFLED_FC_ACCUM
}
// Horizontally reduce accumulators
#define TFLITE_SHUFFLED_FC_STORE(B) \
{ \
int32x2_t pairwise_reduced_acc_0, pairwise_reduced_acc_1, \
pairwise_reduced_acc_2, pairwise_reduced_acc_3; \
pairwise_reduced_acc_0 = \
vpadd_s32(vget_low_s32(row_accum0##B), vget_high_s32(row_accum0##B)); \
pairwise_reduced_acc_1 = \
vpadd_s32(vget_low_s32(row_accum1##B), vget_high_s32(row_accum1##B)); \
pairwise_reduced_acc_2 = \
vpadd_s32(vget_low_s32(row_accum2##B), vget_high_s32(row_accum2##B)); \
pairwise_reduced_acc_3 = \
vpadd_s32(vget_low_s32(row_accum3##B), vget_high_s32(row_accum3##B)); \
const int32x2_t reduced_lo = \
vpadd_s32(pairwise_reduced_acc_0, pairwise_reduced_acc_1); \
const int32x2_t reduced_hi = \
vpadd_s32(pairwise_reduced_acc_2, pairwise_reduced_acc_3); \
int32x4_t reduced = vcombine_s32(reduced_lo, reduced_hi); \
int32x4_t bias_vec = vld1q_s32(bias_data + c); \
reduced = vaddq_s32(reduced, bias_vec); \
reduced = vshlq_s32(reduced, vdupq_n_s32(left_shift)); \
reduced = vqrdmulhq_n_s32(reduced, output_multiplier); \
using gemmlowp::RoundingDivideByPOT; \
reduced = RoundingDivideByPOT(reduced, right_shift); \
const int16x4_t res16 = vqmovn_s32(reduced); \
vst1_s16(output_data + c + B * output_stride, res16); \
}
TFLITE_SHUFFLED_FC_STORE(0);
TFLITE_SHUFFLED_FC_STORE(1);
TFLITE_SHUFFLED_FC_STORE(2);
TFLITE_SHUFFLED_FC_STORE(3);
#undef TFLITE_SHUFFLED_FC_STORE
}
} else {
TFLITE_DCHECK(false);
return;
}
#else
if (batches == 1) {
int16* output_ptr = output_data;
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
// so that just reinterpreting them as int8 values is equivalent to
// subtracting 128 from them, thus implementing for free the subtraction of
// the zero_point value 128.
const int8* shuffled_weights_ptr =
reinterpret_cast<const int8*>(shuffled_weights_data);
// Likewise, we preshuffled and pre-xored the input data above.
const int8* shuffled_input_data =
reinterpret_cast<const int8*>(shuffled_input_workspace_data);
for (int c = 0; c < output_depth; c += 4) {
// Internal accumulation.
// Initialize accumulator with the bias-value.
int32 accum[4] = {0};
// Accumulation loop.
for (int d = 0; d < accum_depth; d += 16) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 16; j++) {
int8 input_val = shuffled_input_data[d + j];
int8 weights_val = *shuffled_weights_ptr++;
accum[i] += weights_val * input_val;
}
}
}
for (int i = 0; i < 4; i++) {
// Add bias value
int acc = accum[i] + bias_data[c + i];
// Down-scale the final int32 accumulator to the scale used by our
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
// multiplier and shift here have been pre-computed offline
// (e.g. by toco).
acc =
MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
// Saturate, cast to int16, and store to output array.
acc = std::max(acc, -32768);
acc = std::min(acc, 32767);
output_ptr[c + i] = acc;
}
}
} else if (batches == 4) {
int16* output_ptr = output_data;
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
// so that just reinterpreting them as int8 values is equivalent to
// subtracting 128 from them, thus implementing for free the subtraction of
// the zero_point value 128.
const int8* shuffled_weights_ptr =
reinterpret_cast<const int8*>(shuffled_weights_data);
// Likewise, we preshuffled and pre-xored the input data above.
const int8* shuffled_input_data =
reinterpret_cast<const int8*>(shuffled_input_workspace_data);
for (int c = 0; c < output_depth; c += 4) {
const int8* shuffled_input_ptr = shuffled_input_data;
// Accumulation loop.
// Internal accumulation.
// Initialize accumulator with the bias-value.
int32 accum[4][4];
for (int i = 0; i < 4; i++) {
for (int b = 0; b < 4; b++) {
accum[i][b] = 0;
}
}
for (int d = 0; d < accum_depth; d += 16) {
for (int i = 0; i < 4; i++) {
for (int b = 0; b < 4; b++) {
for (int j = 0; j < 16; j++) {
int8 input_val = shuffled_input_ptr[16 * b + j];
int8 weights_val = shuffled_weights_ptr[16 * i + j];
accum[i][b] += weights_val * input_val;
}
}
}
shuffled_input_ptr += 64;
shuffled_weights_ptr += 64;
}
for (int i = 0; i < 4; i++) {
for (int b = 0; b < 4; b++) {
// Add bias value
int acc = accum[i][b] + bias_data[c + i];
// Down-scale the final int32 accumulator to the scale used by our
// (16-bit, typically 3 integer bits) fixed-point format. The
// quantized multiplier and shift here have been pre-computed offline
// (e.g. by toco).
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
output_shift);
// Saturate, cast to int16, and store to output array.
acc = std::max(acc, -32768);
acc = std::min(acc, 32767);
output_ptr[b * output_stride + c + i] = acc;
}
}
}
} else {
TFLITE_DCHECK(false);
return;
}
#endif
}
// Wraps ShuffledFullyConnectedWorkerImpl into a Task class
// to allow using gemmlowp's threadpool.
struct ShuffledFullyConnectedWorkerTask : cpu_backend_threadpool::Task {
ShuffledFullyConnectedWorkerTask(const uint8* input_data,
const int8* shuffled_weights_data,
int batches, int output_depth,
int output_stride, int accum_depth,
const int32* bias_data,
int32 output_multiplier, int output_shift,
int16* output_data)
: input_data_(input_data),
shuffled_weights_data_(shuffled_weights_data),
batches_(batches),
output_depth_(output_depth),
output_stride_(output_stride),
accum_depth_(accum_depth),
bias_data_(bias_data),
output_multiplier_(output_multiplier),
output_shift_(output_shift),
output_data_(output_data) {}
void Run() override {
ShuffledFullyConnectedWorkerImpl(
input_data_, shuffled_weights_data_, batches_, output_depth_,
output_stride_, accum_depth_, bias_data_, output_multiplier_,
output_shift_, output_data_);
}
const uint8* input_data_;
const int8* shuffled_weights_data_;
int batches_;
int output_depth_;
int output_stride_;
int accum_depth_;
const int32* bias_data_;
int32 output_multiplier_;
int output_shift_;
int16* output_data_;
};
inline void ShuffledFullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& weights_shape,
const uint8* shuffled_weights_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int16* output_data, uint8* shuffled_input_workspace_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("ShuffledFullyConnected/8bit");
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_EQ(output_activation_min, -32768);
TFLITE_DCHECK_EQ(output_activation_max, 32767);
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int weights_dim_count = weights_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = weights_shape.Dims(weights_dim_count - 1);
TFLITE_DCHECK((accum_depth % 16) == 0);
TFLITE_DCHECK((output_depth % 4) == 0);
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
// so that just reinterpreting them as int8 values is equivalent to
// subtracting 128 from them, thus implementing for free the subtraction of
// the zero_point value 128.
const int8* int8_shuffled_weights_data =
reinterpret_cast<const int8*>(shuffled_weights_data);
// Shuffling and xoring of input activations into the workspace buffer
if (batches == 1) {
#ifdef USE_NEON
const uint8x16_t signbit = vdupq_n_u8(0x80);
for (int i = 0; i < accum_depth; i += 16) {
uint8x16_t val = vld1q_u8(input_data + i);
val = veorq_u8(val, signbit);
vst1q_u8(shuffled_input_workspace_data + i, val);
}
#else
for (int i = 0; i < accum_depth; i++) {
shuffled_input_workspace_data[i] = input_data[i] ^ 0x80;
}
#endif
} else if (batches == 4) {
uint8* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
int c = 0;
#ifdef USE_NEON
const uint8x16_t signbit = vdupq_n_u8(0x80);
for (c = 0; c < accum_depth; c += 16) {
const uint8* src_data_ptr = input_data + c;
uint8x16_t val0 = vld1q_u8(src_data_ptr + 0 * accum_depth);
uint8x16_t val1 = vld1q_u8(src_data_ptr + 1 * accum_depth);
uint8x16_t val2 = vld1q_u8(src_data_ptr + 2 * accum_depth);
uint8x16_t val3 = vld1q_u8(src_data_ptr + 3 * accum_depth);
val0 = veorq_u8(val0, signbit);
val1 = veorq_u8(val1, signbit);
val2 = veorq_u8(val2, signbit);
val3 = veorq_u8(val3, signbit);
vst1q_u8(shuffled_input_workspace_ptr + 0, val0);
vst1q_u8(shuffled_input_workspace_ptr + 16, val1);
vst1q_u8(shuffled_input_workspace_ptr + 32, val2);
vst1q_u8(shuffled_input_workspace_ptr + 48, val3);
shuffled_input_workspace_ptr += 64;
}
#else
for (c = 0; c < accum_depth; c += 16) {
for (int b = 0; b < 4; b++) {
const uint8* src_data_ptr = input_data + b * accum_depth + c;
for (int j = 0; j < 16; j++) {
uint8 src_val = *src_data_ptr++;
// Flip the sign bit, so that the kernel will only need to
// reinterpret these uint8 values as int8, getting for free the
// subtraction of the zero_point value 128.
uint8 dst_val = src_val ^ 0x80;
*shuffled_input_workspace_ptr++ = dst_val;
}
}
}
#endif
} else {
TFLITE_DCHECK(false);
return;
}
static constexpr int kKernelRows = 4;
const int thread_count =
LegacyHowManyThreads<kKernelRows>(cpu_backend_context->max_num_threads(),
output_depth, batches, accum_depth);
if (thread_count == 1) {
// Single-thread case: do the computation on the current thread, don't
// use a threadpool
ShuffledFullyConnectedWorkerImpl(
shuffled_input_workspace_data, int8_shuffled_weights_data, batches,
output_depth, output_depth, accum_depth, bias_data, output_multiplier,
output_shift, output_data);
return;
}
// Multi-threaded case: use the gemmlowp context's threadpool.
TFLITE_DCHECK_GT(thread_count, 1);
std::vector<ShuffledFullyConnectedWorkerTask> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(thread_count);
const int kRowsPerWorker =
RoundUp<kKernelRows>(CeilQuotient(output_depth, thread_count));
int row_start = 0;
for (int i = 0; i < thread_count; i++) {
int row_end = std::min(output_depth, row_start + kRowsPerWorker);
tasks.emplace_back(shuffled_input_workspace_data,
int8_shuffled_weights_data + row_start * accum_depth,
batches, row_end - row_start, output_depth, accum_depth,
bias_data + row_start, output_multiplier, output_shift,
output_data + row_start);
row_start = row_end;
}
TFLITE_DCHECK_EQ(row_start, output_depth);
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
#ifdef USE_NEON
inline int32x4_t RoundToNearest(const float32x4_t input) {
#if defined(__aarch64__) || defined(__SSSE3__)
// Note: vcvtnq_s32_f32 is not available in ARMv7
return vcvtnq_s32_f32(input);
#else
static const float32x4_t zero_val_dup = vdupq_n_f32(0.0f);
static const float32x4_t point5_val_dup = vdupq_n_f32(0.5f);
static const float32x4_t minus_point5_val_dup = vdupq_n_f32(-0.5f);
const uint32x4_t mask = vcltq_f32(input, zero_val_dup);
const float32x4_t round =
vbslq_f32(mask, minus_point5_val_dup, point5_val_dup);
return vcvtq_s32_f32(vaddq_f32(input, round));
#endif // defined(__aarch64__) || defined(__SSSE3__)
}
inline uint32x4_t RoundToNearestUnsigned(const float32x4_t input) {
#if defined(__aarch64__)
// Note that vcvtnq_u32_f32 is not available in ARMv7 or in arm_neon_sse.h.
return vcvtnq_u32_f32(input);
#else
static const float32x4_t point5_val_dup = vdupq_n_f32(0.5f);
return vcvtq_u32_f32(vaddq_f32(input, point5_val_dup));
#endif // defined(__aarch64__)
}
#endif // USE_NEON
inline void MeanImpl(const tflite::MeanParams& op_params,
const RuntimeShape& input_shape, const uint8_t* input_data,
int32 multiplier, int32 shift, int32 bias,
const RuntimeShape& output_shape, uint8_t* output_data,
int start_depth, int end_depth) {
ruy::profiler::ScopeLabel label("Mean4D/Uint8/MeanImpl");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(2);
const int output_width = output_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
constexpr int32_t kMinValue = std::numeric_limits<uint8_t>::min();
constexpr int32_t kMaxValue = std::numeric_limits<uint8_t>::max();
#ifdef USE_NEON
const int32x4_t bias_dup = vdupq_n_s32(bias);
const int32x4_t min_dup = vdupq_n_s32(kMinValue);
const int32x4_t max_dup = vdupq_n_s32(kMaxValue);
#endif // USE_NEON
for (int out_b = 0; out_b < output_batch; ++out_b) {
int out_d = start_depth;
#ifdef USE_NEON
for (; out_d <= end_depth - 16; out_d += 16) {
int32x4x4_t temp_sum;
temp_sum.val[0] = vdupq_n_s32(0);
temp_sum.val[1] = vdupq_n_s32(0);
temp_sum.val[2] = vdupq_n_s32(0);
temp_sum.val[3] = vdupq_n_s32(0);
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
const uint8_t* input_data_ptr =
input_data + Offset(input_shape, out_b, in_h, in_w, out_d);
uint8x16_t input_data_val = vld1q_u8(input_data_ptr);
int16x8_t input_data_low_shift =
vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(input_data_val)));
int16x8_t input_data_high_shift =
vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(input_data_val)));
int32x4_t input_low_low =
vmovl_s16(vget_low_s16(input_data_low_shift));
int32x4_t input_high_low =
vmovl_s16(vget_high_s16(input_data_low_shift));
int32x4_t input_low_high =
vmovl_s16(vget_low_s16(input_data_high_shift));
int32x4_t input_high_high =
vmovl_s16(vget_high_s16(input_data_high_shift));
temp_sum.val[0] = vaddq_s32(temp_sum.val[0], input_low_low);
temp_sum.val[1] = vaddq_s32(temp_sum.val[1], input_high_low);
temp_sum.val[2] = vaddq_s32(temp_sum.val[2], input_low_high);
temp_sum.val[3] = vaddq_s32(temp_sum.val[3], input_high_high);
}
}
temp_sum =
MultiplyByQuantizedMultiplier4Rows(temp_sum, multiplier, shift);
temp_sum.val[0] = vaddq_s32(temp_sum.val[0], bias_dup);
temp_sum.val[1] = vaddq_s32(temp_sum.val[1], bias_dup);
temp_sum.val[2] = vaddq_s32(temp_sum.val[2], bias_dup);
temp_sum.val[3] = vaddq_s32(temp_sum.val[3], bias_dup);
temp_sum.val[0] = vminq_s32(vmaxq_s32(temp_sum.val[0], min_dup), max_dup);
temp_sum.val[1] = vminq_s32(vmaxq_s32(temp_sum.val[1], min_dup), max_dup);
temp_sum.val[2] = vminq_s32(vmaxq_s32(temp_sum.val[2], min_dup), max_dup);
temp_sum.val[3] = vminq_s32(vmaxq_s32(temp_sum.val[3], min_dup), max_dup);
uint16x4_t narrowed_low_low =
vmovn_u32(vreinterpretq_u32_s32(temp_sum.val[0]));
uint16x4_t narrowed_high_low =
vmovn_u32(vreinterpretq_u32_s32(temp_sum.val[1]));
uint16x4_t narrowed_low_high =
vmovn_u32(vreinterpretq_u32_s32(temp_sum.val[2]));
uint16x4_t narrowed_high_high =
vmovn_u32(vreinterpretq_u32_s32(temp_sum.val[3]));
uint16x8_t combined_low =
vcombine_u16(narrowed_low_low, narrowed_high_low);
uint16x8_t combined_high =
vcombine_u16(narrowed_low_high, narrowed_high_high);
uint8x8_t narrowed_low = vmovn_u16(combined_low);
uint8x8_t narrowed_high = vmovn_u16(combined_high);
uint8x16_t combined_output = vcombine_u8(narrowed_low, narrowed_high);
uint8_t* output_data_ptr =
output_data + Offset(output_shape, out_b, 0, 0, out_d);
vst1q_u8(output_data_ptr, combined_output);
}
#endif // USE_NEON
for (; out_d < end_depth; ++out_d) {
int acc = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)];
}
}
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
acc += bias;
acc = std::min(std::max(acc, kMinValue), kMaxValue);
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
static_cast<uint8_t>(acc);
}
}
}
struct MeanWorkerTask : cpu_backend_threadpool::Task {
MeanWorkerTask(const tflite::MeanParams& op_params,
const RuntimeShape& input_shape, const uint8_t* input_data,
int32 multiplier, int32 shift, int32 bias,
const RuntimeShape& output_shape, uint8_t* output_data,
int start_height, int end_height)
: op_params(op_params),
input_shape(input_shape),
input_data(input_data),
multiplier(multiplier),
shift(shift),
bias(bias),
output_shape(output_shape),
output_data(output_data),
start_height(start_height),
end_height(end_height) {}
void Run() override {
MeanImpl(op_params, input_shape, input_data, multiplier, shift, bias,
output_shape, output_data, start_height, end_height);
}
private:
const tflite::MeanParams& op_params;
const RuntimeShape& input_shape;
const uint8_t* input_data;
int32 multiplier;
int32 shift;
int32 bias;
const RuntimeShape& output_shape;
uint8_t* output_data;
int start_height;
int end_height;
};
inline void Mean(const tflite::MeanParams& op_params,
const RuntimeShape& unextended_input_shape,
const uint8_t* input_data, int32 input_zero_point,
float input_scale, const RuntimeShape& unextended_output_shape,
uint8_t* output_data, int32 output_zero_point,
float output_scale, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("Mean4D/Uint8");
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const float num_elements_in_axis = input_width * input_height;
float temp = input_zero_point * input_scale / output_scale;
temp = temp > 0 ? temp + 0.5f : temp - 0.5f;
int32_t bias = output_zero_point - static_cast<int32_t>(temp);
float real_scale = input_scale / (num_elements_in_axis * output_scale);
int32 multiplier, shift;
QuantizeMultiplier(real_scale, &multiplier, &shift);
constexpr int kMinDepthPerThread = 8;
int thread_count = output_depth / kMinDepthPerThread;
thread_count = thread_count > 0 ? thread_count : 1;
const int capped_thread_count =
std::min(thread_count, cpu_backend_context->max_num_threads());
if (capped_thread_count == 1) {
MeanImpl(op_params, input_shape, input_data, multiplier, shift, bias,
output_shape, output_data, 0, output_depth);
} else {
// Instead parallel for batch, we loop for the output_depth since batch
// is typical 1.
std::vector<MeanWorkerTask> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(capped_thread_count);
int depth_start = 0;
for (int i = 0; i < capped_thread_count; ++i) {
// Try to distribute the tasks as even as possible.
int depth_end = depth_start +
(output_depth - depth_start) / (capped_thread_count - i);
tasks.emplace_back(op_params, input_shape, input_data, multiplier, shift,
bias, output_shape, output_data, depth_start,
depth_end);
depth_start = depth_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
template <typename T, typename U>
inline bool MeanGeneral(const T* input_data, const int* input_dims,
const int input_num_dims, T* output_data,
const int* output_dims, const int output_num_dims,
const int* axis, const int num_axis_dimensions,
bool keep_dims, int* temp_index, int* resolved_axis,
U* temp_sum) {
return reference_ops::Mean(input_data, input_dims, input_num_dims,
output_data, output_dims, output_num_dims, axis,
num_axis_dimensions, keep_dims, temp_index,
resolved_axis, temp_sum);
}
template <>
inline bool MeanGeneral<float, float>(
const float* input_data, const int* input_dims, const int input_num_dims,
float* output_data, const int* output_dims, const int output_num_dims,
const int* axis, const int num_axis_dimensions, bool keep_dims,
int* temp_index, int* resolved_axis, float* temp_sum) {
// Handle reduce_mean for the last dimensions.
if (num_axis_dimensions == 1 && axis[0] == (input_num_dims - 1)) {
ruy::profiler::ScopeLabel label("MeanLastDim/Float");
int output_size = 1;
for (int i = 0; i < input_num_dims - 1; ++i) {
output_size *= input_dims[i];
}
const int last_input_dim = input_dims[axis[0]];
// TODO(b/152563685): Consider use eigen to cover more general cases.
const MatrixMap<const float> in_mat(input_data, last_input_dim,
output_size);
VectorMap<float> out(output_data, output_size, 1);
out = (in_mat.array().colwise().sum()) / static_cast<float>(last_input_dim);
return true;
}
return reference_ops::Mean(input_data, input_dims, input_num_dims,
output_data, output_dims, output_num_dims, axis,
num_axis_dimensions, keep_dims, temp_index,
resolved_axis, temp_sum);
}
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape,
float* im2col_data, CpuBackendContext* cpu_backend_context) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
ruy::profiler::ScopeLabel label("Conv");
// NB: the float 0.0f value is represented by all zero bytes.
const uint8 float_zero_byte = 0x00;
const float* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
if (need_dilated_im2col) {
DilatedIm2col(params, float_zero_byte, input_shape, input_data,
filter_shape, output_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
Im2col(params, filter_height, filter_width, float_zero_byte, input_shape,
input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
const int gemm_input_dims = gemm_input_shape->DimensionsCount();
int m = FlatSizeSkipDim(*gemm_input_shape, gemm_input_dims - 1);
int n = output_shape.Dims(3);
int k = gemm_input_shape->Dims(gemm_input_dims - 1);
#if defined(TF_LITE_USE_CBLAS) && defined(__APPLE__)
// The following code computes matrix multiplication c = a * transponse(b)
// with CBLAS, where:
// * `a` is a matrix with dimensions (m, k).
// * `b` is a matrix with dimensions (n, k), so transpose(b) is (k, n).
// * `c` is a matrix with dimensions (m, n).
// The naming of variables are aligned with CBLAS specification here.
const float* a = gemm_input_data;
const float* b = filter_data;
float* c = output_data;
// The stride of matrix a, b and c respectively.
int stride_a = k;
int stride_b = k;
int stride_c = n;
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasTrans, m, n, k, 1.0f, a,
stride_a, b, stride_b, 0.0f, c, stride_c);
optimized_ops::AddBiasAndEvalActivationFunction(
output_activation_min, output_activation_max, bias_shape, bias_data,
output_shape, output_data);
#else
// When an optimized CBLAS implementation is not available, fall back
// to using cpu_backend_gemm.
cpu_backend_gemm::MatrixParams<float> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = n;
lhs_params.cols = k;
cpu_backend_gemm::MatrixParams<float> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = k;
rhs_params.cols = m;
cpu_backend_gemm::MatrixParams<float> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = n;
dst_params.cols = m;
cpu_backend_gemm::GemmParams<float, float> gemm_params;
gemm_params.bias = bias_data;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, gemm_input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
#endif // defined(TF_LITE_USE_CBLAS) && defined(__APPLE__)
}
inline void HybridConv(const ConvParams& params, float* scaling_factors_ptr,
const RuntimeShape& input_shape,
const int8_t* input_data,
const RuntimeShape& filter_shape,
const int8_t* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& accum_scratch_shape,
int32_t* accum_scratch, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape,
int8_t* im2col_data, CpuBackendContext* context) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batch_size = input_shape.Dims(0);
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int input_zero_point = 0;
const int8_t* gemm_input_data = nullptr;
int num_input;
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
if (need_dilated_im2col) {
DilatedIm2col(params, input_zero_point, input_shape, input_data,
filter_shape, output_shape, im2col_data);
gemm_input_data = im2col_data;
num_input = im2col_shape.FlatSize();
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
// symmetric quantization assumes zero point of 0.
Im2col(params, filter_height, filter_width, input_zero_point, input_shape,
input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
num_input = im2col_shape.FlatSize();
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
num_input = input_shape.FlatSize();
}
// Flatten 4D matrices into 2D matrices for matrix multiplication.
// Flatten so that each filter has its own row.
const int filter_rows = filter_shape.Dims(0);
const int filter_cols = FlatSizeSkipDim(filter_shape, 0);
// In MatrixBatchVectorMultiplyAccumulate, each output value is the
// dot product of one row of the first matrix with one row of the second
// matrix. Therefore, the number of cols in each matrix are equivalent.
//
// After Im2Col, each input patch becomes a row.
const int gemm_input_cols = filter_cols;
const int gemm_input_rows = num_input / gemm_input_cols;
const int output_cols = output_shape.Dims(3);
const int output_rows = FlatSizeSkipDim(output_shape, 3);
TFLITE_DCHECK_EQ(output_cols, filter_rows);
TFLITE_DCHECK_EQ(output_rows, gemm_input_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_cols);
// MatrixBatchVectorMultiplyAccumulate assumes that each row of the second
// input matrix has its own scale factor. This code duplicates the scale
// factors for each row in the same batch.
const int rows_per_batch = gemm_input_rows / batch_size;
for (int i = gemm_input_rows - 1; i >= 0; --i) {
scaling_factors_ptr[i] = scaling_factors_ptr[i / rows_per_batch];
}
std::fill_n(output_data, output_rows * output_cols, 0.0f);
// The scratch buffer must have the same size as the output.
TFLITE_DCHECK_EQ(accum_scratch_shape.FlatSize(), output_shape.FlatSize());
tensor_utils::MatrixBatchVectorMultiplyAccumulate(
filter_data, filter_rows, filter_cols, gemm_input_data,
scaling_factors_ptr, /*n_batch=*/gemm_input_rows, accum_scratch,
output_data, context);
AddBiasAndEvalActivationFunction(output_activation_min, output_activation_max,
bias_shape, bias_data, output_shape,
output_data);
}
inline void HybridConvPerChannel(
const ConvParams& params, float* scaling_factors_ptr,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const RuntimeShape& im2col_shape, int8_t* im2col_data,
const float* per_channel_scale, int32_t* input_offset,
const RuntimeShape& scratch_shape, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("ConvHybridPerChannel");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int8* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
const int batch_size = input_shape.Dims(0);
if (need_dilated_im2col) {
TFLITE_DCHECK(im2col_data);
optimized_ops::DilatedIm2col(params, input_shape, input_data, filter_shape,
output_shape, im2col_data, input_offset,
batch_size);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
Im2col(params, filter_height, filter_width, input_offset, batch_size,
input_shape, input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
const int filter_rows = filter_shape.Dims(0);
const int filter_cols = FlatSizeSkipDim(filter_shape, 0);
const int gemm_input_rows = gemm_input_shape->Dims(3);
const int gemm_input_cols = FlatSizeSkipDim(*gemm_input_shape, 3);
const int output_rows = output_shape.Dims(3);
const int output_cols =
output_shape.Dims(0) * output_shape.Dims(1) * output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(output_cols, gemm_input_cols);
TFLITE_DCHECK_EQ(filter_cols, gemm_input_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
TFLITE_DCHECK_EQ(scratch_shape.FlatSize(), output_shape.FlatSize());
if (!compute_row_sums || *compute_row_sums) {
tensor_utils::ReductionSumVector(filter_data, row_sums, filter_rows,
filter_cols);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
cpu_backend_gemm::MatrixParams<int8> lhs_params;
lhs_params.rows = filter_rows;
lhs_params.cols = filter_cols;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
cpu_backend_gemm::MatrixParams<int8> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = gemm_input_rows;
rhs_params.cols = gemm_input_cols;
cpu_backend_gemm::MatrixParams<int32> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = output_rows;
dst_params.cols = output_cols;
// TODO(b/149003801): Use hybrid gemm once supported in Ruy.
cpu_backend_gemm::GemmParams<int32_t, int32_t> gemm_params;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, gemm_input_data,
dst_params, scratch, gemm_params, cpu_backend_context);
MatrixMap<float> out_mat(output_data, filter_rows, output_cols);
MatrixMap<int32_t> in_mat(scratch, filter_rows, output_cols);
VectorMap<const float> bias_data_vec(bias_data, filter_rows, 1);
VectorMap<int32_t> row_sums_vec(row_sums, filter_rows, 1);
VectorMap<const float> per_channel_scale_vec(per_channel_scale, filter_rows,
1);
const int cols_per_batch = output_cols / batch_size;
for (int c = 0; c < output_cols; c++) {
const int b = c / cols_per_batch;
const float input_scale = scaling_factors_ptr[b];
const int32_t zero_point = input_offset[b];
out_mat.col(c) =
(((in_mat.col(c) - (row_sums_vec * zero_point))
.cast<float>()
.cwiseProduct((per_channel_scale_vec * input_scale))) +
bias_data_vec)
.cwiseMin(params.float_activation_max)
.cwiseMax(params.float_activation_min);
}
}
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, const RuntimeShape& im2col_shape,
uint8* im2col_data, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("Conv/8bit");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int32 input_offset = params.input_offset;
const int32 filter_offset = params.weights_offset;
const int32 output_offset = params.output_offset;
const int32 output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const uint8* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const bool need_dilated_im2col =
dilation_width_factor != 1 || dilation_height_factor != 1;
const bool need_im2col = stride_width != 1 || stride_height != 1 ||
filter_width != 1 || filter_height != 1;
if (need_dilated_im2col) {
TFLITE_DCHECK(im2col_data);
const int input_zero_point = -input_offset;
TFLITE_DCHECK_GE(input_zero_point, 0);
TFLITE_DCHECK_LE(input_zero_point, 255);
DilatedIm2col(params, input_zero_point, input_shape, input_data,
filter_shape, output_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
const int input_zero_point = -input_offset;
TFLITE_DCHECK_GE(input_zero_point, 0);
TFLITE_DCHECK_LE(input_zero_point, 255);
Im2col(params, filter_height, filter_width, input_zero_point, input_shape,
input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
const int gemm_input_rows = gemm_input_shape->Dims(3);
// Using FlatSizeSkipDim causes segfault in some contexts (see b/79927784).
// The root cause has not yet been identified though. Same applies below for
// the other calls commented out. This is a partial rollback of cl/196819423.
// const int gemm_input_cols = FlatSizeSkipDim(*gemm_input_shape, 3);
const int gemm_input_cols = gemm_input_shape->Dims(0) *
gemm_input_shape->Dims(1) *
gemm_input_shape->Dims(2);
const int filter_rows = filter_shape.Dims(0);
// See b/79927784.
// const int filter_cols = FlatSizeSkipDim(filter_shape, 0);
const int filter_cols =
filter_shape.Dims(1) * filter_shape.Dims(2) * filter_shape.Dims(3);
const int output_rows = output_shape.Dims(3);
// See b/79927784.
// const int output_cols = FlatSizeSkipDim(output_shape, 3);
const int output_cols =
output_shape.Dims(0) * output_shape.Dims(1) * output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_rows, filter_rows);
TFLITE_DCHECK_EQ(output_cols, gemm_input_cols);
TFLITE_DCHECK_EQ(filter_cols, gemm_input_rows);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_rows);
cpu_backend_gemm::MatrixParams<uint8> lhs_params;
lhs_params.rows = filter_rows;
lhs_params.cols = filter_cols;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.zero_point = -filter_offset;
cpu_backend_gemm::MatrixParams<uint8> rhs_params;
rhs_params.rows = gemm_input_rows;
rhs_params.cols = gemm_input_cols;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.zero_point = -input_offset;
cpu_backend_gemm::MatrixParams<uint8> dst_params;
dst_params.rows = output_rows;
dst_params.cols = output_cols;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.zero_point = output_offset;
cpu_backend_gemm::GemmParams<int32, uint8> gemm_params;
gemm_params.bias = bias_data;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
gemm_params.multiplier_fixedpoint = output_multiplier;
gemm_params.multiplier_exponent = output_shift;
cpu_backend_gemm::Gemm(lhs_params, filter_data, rhs_params, gemm_input_data,
dst_params, output_data, gemm_params,
cpu_backend_context);
}
template <typename T>
inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("DepthToSpace");
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int input_depth = input_shape.Dims(3);
const int input_width = input_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int output_depth = output_shape.Dims(3);
const int batch_size = output_shape.Dims(0);
// Number of continuous values that we can copy in one interation.
const int stride = op_params.block_size * output_depth;
for (int batch = 0; batch < batch_size; ++batch) {
for (int in_h = 0; in_h < input_height; ++in_h) {
const T* input_ptr = input_data + Offset(input_shape, batch, in_h, 0, 0);
for (int offset_h = 0; offset_h < op_params.block_size; ++offset_h) {
const T* src = input_ptr;
for (int in_w = 0; in_w < input_width; ++in_w) {
memcpy(output_data, src, stride * sizeof(T));
output_data += stride;
src += input_depth;
}
input_ptr += stride;
}
}
}
}
template <typename T>
inline void SpaceToDepth(const tflite::SpaceToDepthParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("SpaceToDepth");
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_depth = output_shape.Dims(3);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int input_depth = input_shape.Dims(3);
const int batch_size = input_shape.Dims(0);
// Number of continuous values that we can copy in one interation.
const int stride = op_params.block_size * input_depth;
for (int batch = 0; batch < batch_size; ++batch) {
for (int out_h = 0; out_h < output_height; ++out_h) {
T* output_ptr = output_data + Offset(output_shape, batch, out_h, 0, 0);
for (int offset_h = 0; offset_h < op_params.block_size; ++offset_h) {
T* dst = output_ptr;
for (int out_w = 0; out_w < output_width; ++out_w) {
memcpy(dst, input_data, stride * sizeof(T));
input_data += stride;
dst += output_depth;
}
output_ptr += stride;
}
}
}
}
inline void Relu(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Relu (not fused)");
const auto input = MapAsVector(input_data, input_shape);
auto output = MapAsVector(output_data, output_shape);
output = input.cwiseMax(0.0f);
}
inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
float* output_data, float epsilon = 1e-6) {
ruy::profiler::ScopeLabel label("L2Normalization");
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
for (int i = 0; i < outer_size; ++i) {
float squared_l2_norm = 0;
for (int c = 0; c < depth; ++c) {
const float val = input_data[c];
squared_l2_norm += val * val;
}
float l2_norm = std::sqrt(squared_l2_norm);
l2_norm = std::max(l2_norm, epsilon);
for (int c = 0; c < depth; ++c) {
*output_data = *input_data / l2_norm;
++output_data;
++input_data;
}
}
}
inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
const RuntimeShape& input_shape,
const uint8* input_data,
const RuntimeShape& output_shape,
uint8* output_data) {
ruy::profiler::ScopeLabel label("L2Normalization/8bit");
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int32 input_zero_point = op_params.input_zero_point;
for (int i = 0; i < outer_size; ++i) {
int32 square_l2_norm = 0;
for (int c = 0; c < depth; c++) {
// Note that input_data advances by depth in the second pass below.
int32 diff = input_data[c] - input_zero_point;
square_l2_norm += diff * diff;
}
// TODO(b/29395854): add clamping to TOCO and TF Lite kernel
// for all zero tensors in the input_data
int32 inv_l2norm_multiplier;
int inv_l2norm_shift;
GetInvSqrtQuantizedMultiplierExp(square_l2_norm, kReverseShift,
&inv_l2norm_multiplier, &inv_l2norm_shift);
for (int c = 0; c < depth; c++) {
int32 diff = *input_data - input_zero_point;
int32 rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp(
128 * diff, inv_l2norm_multiplier, inv_l2norm_shift);
int32 unclamped_output_val = 128 + rescaled_diff;
int32 output_val = std::min(255, std::max(0, unclamped_output_val));
*output_data = static_cast<uint8>(output_val);
++input_data;
++output_data;
}
}
}
inline void AddElementwise(int size, const ArithmeticParams& params,
const float* input1_data, const float* input2_data,
float* output_data) {
int i = 0;
#ifdef USE_NEON
const auto activation_min = vdupq_n_f32(params.float_activation_min);
const auto activation_max = vdupq_n_f32(params.float_activation_max);
for (; i <= size - 16; i += 16) {
auto a10 = vld1q_f32(input1_data + i);
auto a11 = vld1q_f32(input1_data + i + 4);
auto a12 = vld1q_f32(input1_data + i + 8);
auto a13 = vld1q_f32(input1_data + i + 12);
auto a20 = vld1q_f32(input2_data + i);
auto a21 = vld1q_f32(input2_data + i + 4);
auto a22 = vld1q_f32(input2_data + i + 8);
auto a23 = vld1q_f32(input2_data + i + 12);
auto x0 = vaddq_f32(a10, a20);
auto x1 = vaddq_f32(a11, a21);
auto x2 = vaddq_f32(a12, a22);
auto x3 = vaddq_f32(a13, a23);
x0 = vmaxq_f32(activation_min, x0);
x1 = vmaxq_f32(activation_min, x1);
x2 = vmaxq_f32(activation_min, x2);
x3 = vmaxq_f32(activation_min, x3);
x0 = vminq_f32(activation_max, x0);
x1 = vminq_f32(activation_max, x1);
x2 = vminq_f32(activation_max, x2);
x3 = vminq_f32(activation_max, x3);
vst1q_f32(output_data + i, x0);
vst1q_f32(output_data + i + 4, x1);
vst1q_f32(output_data + i + 8, x2);
vst1q_f32(output_data + i + 12, x3);
}
for (; i <= size - 4; i += 4) {
auto a1 = vld1q_f32(input1_data + i);
auto a2 = vld1q_f32(input2_data + i);
auto x = vaddq_f32(a1, a2);
x = vmaxq_f32(activation_min, x);
x = vminq_f32(activation_max, x);
vst1q_f32(output_data + i, x);
}
#endif // NEON
for (; i < size; i++) {
auto x = input1_data[i] + input2_data[i];
output_data[i] = ActivationFunctionWithMinMax(
x, params.float_activation_min, params.float_activation_max);
}
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const float* input1_data,
const RuntimeShape& input2_shape, const float* input2_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Add");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.
inline void AddElementwise(int size, const ArithmeticParams& params,
const uint8* input1_data, const uint8* input2_data,
uint8* output_data) {
ruy::profiler::ScopeLabel label("AddElementwise/8bit");
int i = 0;
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
#ifdef USE_NEON
const uint8x8_t output_activation_min_vector =
vdup_n_u8(params.quantized_activation_min);
const uint8x8_t output_activation_max_vector =
vdup_n_u8(params.quantized_activation_max);
for (; i <= size - 8; i += 8) {
const uint8x8_t input1_val_original = vld1_u8(input1_data + i);
const uint8x8_t input2_val_original = vld1_u8(input2_data + i);
const int16x8_t input1_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input1_val_original));
const int16x8_t input2_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input2_val_original));
const int16x8_t input1_val =
vaddq_s16(input1_val_s16, vdupq_n_s16(params.input1_offset));
const int16x8_t input2_val =
vaddq_s16(input2_val_s16, vdupq_n_s16(params.input2_offset));
const int16x4_t input1_val_high = vget_high_s16(input1_val);
const int16x4_t input1_val_low = vget_low_s16(input1_val);
const int16x4_t input2_val_high = vget_high_s16(input2_val);
const int16x4_t input2_val_low = vget_low_s16(input2_val);
int32x4_t x11 = vmovl_s16(input1_val_low);
int32x4_t x12 = vmovl_s16(input1_val_high);
int32x4_t x21 = vmovl_s16(input2_val_low);
int32x4_t x22 = vmovl_s16(input2_val_high);
const int32x4_t left_shift_dup = vdupq_n_s32(params.left_shift);
x11 = vshlq_s32(x11, left_shift_dup);
x12 = vshlq_s32(x12, left_shift_dup);
x21 = vshlq_s32(x21, left_shift_dup);
x22 = vshlq_s32(x22, left_shift_dup);
x11 = vqrdmulhq_n_s32(x11, params.input1_multiplier);
x12 = vqrdmulhq_n_s32(x12, params.input1_multiplier);
x21 = vqrdmulhq_n_s32(x21, params.input2_multiplier);
x22 = vqrdmulhq_n_s32(x22, params.input2_multiplier);
const int32x4_t input1_shift_dup = vdupq_n_s32(params.input1_shift);
const int32x4_t input2_shift_dup = vdupq_n_s32(params.input2_shift);
x11 = vshlq_s32(x11, input1_shift_dup);
x12 = vshlq_s32(x12, input1_shift_dup);
x21 = vshlq_s32(x21, input2_shift_dup);
x22 = vshlq_s32(x22, input2_shift_dup);
int32x4_t s1 = vaddq_s32(x11, x21);
int32x4_t s2 = vaddq_s32(x12, x22);
s1 = vqrdmulhq_n_s32(s1, params.output_multiplier);
s2 = vqrdmulhq_n_s32(s2, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
s1 = RoundingDivideByPOT(s1, -params.output_shift);
s2 = RoundingDivideByPOT(s2, -params.output_shift);
const int16x4_t s1_narrowed = vmovn_s32(s1);
const int16x4_t s2_narrowed = vmovn_s32(s2);
const int16x8_t s = vaddq_s16(vcombine_s16(s1_narrowed, s2_narrowed),
vdupq_n_s16(params.output_offset));
const uint8x8_t clamped =
vmax_u8(output_activation_min_vector,
vmin_u8(output_activation_max_vector, vqmovun_s16(s)));
vst1_u8(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
const int32 input1_val = params.input1_offset + input1_data[i];
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<uint8>(clamped_output);
}
}
// Scalar-broadcast add that can be used for inner loop of more general
// broadcast add, so that, for example, scalar-broadcast with batch will still
// be fast.
inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
uint8 input1_data, const uint8* input2_data,
uint8* output_data) {
using gemmlowp::RoundingDivideByPOT;
ruy::profiler::ScopeLabel label("AddScalarBroadcast/8bit");
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
int i = 0;
#ifdef USE_NEON
const int32x4_t left_shift_dup = vdupq_n_s32(params.left_shift);
const uint8x8_t output_activation_min_vector =
vdup_n_u8(params.quantized_activation_min);
const uint8x8_t output_activation_max_vector =
vdup_n_u8(params.quantized_activation_max);
// Process broadcast scalar.
const uint8x8_t input1_val_original = vdup_n_u8(input1_data);
const int16x8_t input1_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input1_val_original));
const int16x8_t input1_val =
vaddq_s16(input1_val_s16, vdupq_n_s16(params.input1_offset));
const int16x4_t input1_val_high = vget_high_s16(input1_val);
const int16x4_t input1_val_low = vget_low_s16(input1_val);
int32x4_t x11 = vmovl_s16(input1_val_low);
int32x4_t x12 = vmovl_s16(input1_val_high);
x11 = vshlq_s32(x11, left_shift_dup);
x12 = vshlq_s32(x12, left_shift_dup);
x11 = vqrdmulhq_n_s32(x11, params.input1_multiplier);
x12 = vqrdmulhq_n_s32(x12, params.input1_multiplier);
const int32x4_t input1_shift_dup = vdupq_n_s32(params.input1_shift);
x11 = vshlq_s32(x11, input1_shift_dup);
x12 = vshlq_s32(x12, input1_shift_dup);
for (; i <= size - 8; i += 8) {
const uint8x8_t input2_val_original = vld1_u8(input2_data + i);
const int16x8_t input2_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input2_val_original));
const int16x8_t input2_val =
vaddq_s16(input2_val_s16, vdupq_n_s16(params.input2_offset));
const int16x4_t input2_val_high = vget_high_s16(input2_val);
const int16x4_t input2_val_low = vget_low_s16(input2_val);
int32x4_t x21 = vmovl_s16(input2_val_low);
int32x4_t x22 = vmovl_s16(input2_val_high);
x21 = vshlq_s32(x21, left_shift_dup);
x22 = vshlq_s32(x22, left_shift_dup);
x21 = vqrdmulhq_n_s32(x21, params.input2_multiplier);
x22 = vqrdmulhq_n_s32(x22, params.input2_multiplier);
const int32x4_t input2_shift_dup = vdupq_n_s32(params.input2_shift);
x21 = vshlq_s32(x21, input2_shift_dup);
x22 = vshlq_s32(x22, input2_shift_dup);
int32x4_t s1 = vaddq_s32(x11, x21);
int32x4_t s2 = vaddq_s32(x12, x22);
s1 = vqrdmulhq_n_s32(s1, params.output_multiplier);
s2 = vqrdmulhq_n_s32(s2, params.output_multiplier);
s1 = RoundingDivideByPOT(s1, -params.output_shift);
s2 = RoundingDivideByPOT(s2, -params.output_shift);
const int16x4_t s1_narrowed = vmovn_s32(s1);
const int16x4_t s2_narrowed = vmovn_s32(s2);
const int16x8_t s = vaddq_s16(vcombine_s16(s1_narrowed, s2_narrowed),
vdupq_n_s16(params.output_offset));
const uint8x8_t clamped =
vmax_u8(output_activation_min_vector,
vmin_u8(output_activation_max_vector, vqmovun_s16(s)));
vst1_u8(output_data + i, clamped);
}
#endif // NEON
if (i < size) {
// Process broadcast scalar.
const int32 input1_val = params.input1_offset + input1_data;
const int32 shifted_input1_val = input1_val * (1 << params.left_shift);
const int32 scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
for (; i < size; ++i) {
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 shifted_input2_val = input2_val * (1 << params.left_shift);
const int32 scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier,
params.input2_shift);
const int32 raw_sum = scaled_input1_val + scaled_input2_val;
const int32 raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<uint8>(clamped_output);
}
}
}
// Scalar-broadcast add that can be used for inner loop of more general
// broadcast add, so that, for example, scalar-broadcast with batch will still
// be fast.
inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
float broadcast_value, const float* input2_data,
float* output_data) {
int i = 0;
#ifdef USE_NEON
const float32x4_t output_activation_min_vector =
vdupq_n_f32(params.float_activation_min);
const float32x4_t output_activation_max_vector =
vdupq_n_f32(params.float_activation_max);
const float32x4_t broadcast_value_dup = vdupq_n_f32(broadcast_value);
for (; i <= size - 4; i += 4) {
const float32x4_t input2_val_original = vld1q_f32(input2_data + i);
const float32x4_t output =
vaddq_f32(input2_val_original, broadcast_value_dup);
const float32x4_t clamped =
vmaxq_f32(output_activation_min_vector,
vminq_f32(output_activation_max_vector, output));
vst1q_f32(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
auto x = broadcast_value + input2_data[i];
output_data[i] = ActivationFunctionWithMinMax(
x, params.float_activation_min, params.float_activation_max);
}
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8* input1_data,
const RuntimeShape& input2_shape, const uint8* input2_data,
const RuntimeShape& output_shape, uint8* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
ruy::profiler::ScopeLabel label("Add/8bit");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16* input1_data,
const RuntimeShape& input2_shape, const int16* input2_data,
const RuntimeShape& output_shape, int16* output_data) {
ruy::profiler::ScopeLabel label("Add/Int16");
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int input1_shift = params.input1_shift;
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
const int16 output_activation_min = params.quantized_activation_min;
const int16 output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
TFLITE_DCHECK_LE(input1_shift, 0);
TFLITE_DCHECK_LE(params.input2_shift, 0);
const int16* not_shift_input = input1_shift == 0 ? input1_data : input2_data;
const int16* shift_input = input1_shift == 0 ? input2_data : input1_data;
const int input_right_shift =
input1_shift == 0 ? -params.input2_shift : -input1_shift;
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
F0 scaled_input = F0::FromRaw(
gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
const int16 raw_output = result.raw();
const int16 clamped_output = std::min(
output_activation_max, std::max(output_activation_min, raw_output));
output_data[i] = clamped_output;
}
}
template <typename T>
inline typename std::enable_if<is_int32_or_int64<T>::value, void>::type Add(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Add/int32or64");
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
auto input1_map = MapAsVector(input1_data, input1_shape);
auto input2_map = MapAsVector(input2_data, input2_shape);
auto output_map = MapAsVector(output_data, output_shape);
if (input1_shape == input2_shape) {
output_map.array() = (input1_map.array() + input2_map.array())
.cwiseMax(activation_min)
.cwiseMin(activation_max);
} else if (input2_shape.FlatSize() == 1) {
auto scalar = input2_data[0];
output_map.array() = (input1_map.array() + scalar)
.cwiseMax(activation_min)
.cwiseMin(activation_max);
} else if (input1_shape.FlatSize() == 1) {
auto scalar = input1_data[0];
output_map.array() = (scalar + input2_map.array())
.cwiseMax(activation_min)
.cwiseMin(activation_max);
} else {
reference_ops::BroadcastAdd4DSlow<T>(params, input1_shape, input1_data,
input2_shape, input2_data,
output_shape, output_data);
}
}
template <typename T>
inline void BroadcastAddDispatch(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return BroadcastAdd4DSlow(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
}
BinaryBroadcastFiveFold(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data,
static_cast<void (*)(int, const ArithmeticParams&, const T*, const T*,
T*)>(AddElementwise),
static_cast<void (*)(int, const ArithmeticParams&, T, const T*, T*)>(
AddScalarBroadcast));
}
inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const uint8* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const uint8* unswitched_input2_data,
const RuntimeShape& output_shape,
uint8* output_data) {
BroadcastAddDispatch(unswitched_params, unswitched_input1_shape,
unswitched_input1_data, unswitched_input2_shape,
unswitched_input2_data, output_shape, output_data);
}
inline void BroadcastAddFivefold(const ArithmeticParams& params,
const RuntimeShape& unswitched_input1_shape,
const float* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const float* unswitched_input2_data,
const RuntimeShape& output_shape,
float* output_data) {
BroadcastAddDispatch(params, unswitched_input1_shape, unswitched_input1_data,
unswitched_input2_shape, unswitched_input2_data,
output_shape, output_data);
}
inline void MulElementwise(int size, const ArithmeticParams& params,
const float* input1_data, const float* input2_data,
float* output_data) {
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
int i = 0;
#ifdef USE_NEON
const auto activation_min = vdupq_n_f32(output_activation_min);
const auto activation_max = vdupq_n_f32(output_activation_max);
for (; i <= size - 16; i += 16) {
auto a10 = vld1q_f32(input1_data + i);
auto a11 = vld1q_f32(input1_data + i + 4);
auto a12 = vld1q_f32(input1_data + i + 8);
auto a13 = vld1q_f32(input1_data + i + 12);
auto a20 = vld1q_f32(input2_data + i);
auto a21 = vld1q_f32(input2_data + i + 4);
auto a22 = vld1q_f32(input2_data + i + 8);
auto a23 = vld1q_f32(input2_data + i + 12);
auto x0 = vmulq_f32(a10, a20);
auto x1 = vmulq_f32(a11, a21);
auto x2 = vmulq_f32(a12, a22);
auto x3 = vmulq_f32(a13, a23);
x0 = vmaxq_f32(activation_min, x0);
x1 = vmaxq_f32(activation_min, x1);
x2 = vmaxq_f32(activation_min, x2);
x3 = vmaxq_f32(activation_min, x3);
x0 = vminq_f32(activation_max, x0);
x1 = vminq_f32(activation_max, x1);
x2 = vminq_f32(activation_max, x2);
x3 = vminq_f32(activation_max, x3);
vst1q_f32(output_data + i, x0);
vst1q_f32(output_data + i + 4, x1);
vst1q_f32(output_data + i + 8, x2);
vst1q_f32(output_data + i + 12, x3);
}
for (; i <= size - 4; i += 4) {
auto a1 = vld1q_f32(input1_data + i);
auto a2 = vld1q_f32(input2_data + i);
auto x = vmulq_f32(a1, a2);
x = vmaxq_f32(activation_min, x);
x = vminq_f32(activation_max, x);
vst1q_f32(output_data + i, x);
}
#endif // NEON
for (; i < size; i++) {
auto x = input1_data[i] * input2_data[i];
output_data[i] = ActivationFunctionWithMinMax(x, output_activation_min,
output_activation_max);
}
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const float* input1_data,
const RuntimeShape& input2_shape, const float* input2_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Mul");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int32* input1_data,
const RuntimeShape& input2_shape, const int32* input2_data,
const RuntimeShape& output_shape, int32* output_data) {
ruy::profiler::ScopeLabel label("Mul/int32/activation");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] * input2_data[i], output_activation_min,
output_activation_max);
}
}
inline void MulNoActivation(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int32* input1_data,
const RuntimeShape& input2_shape,
const int32* input2_data,
const RuntimeShape& output_shape,
int32* output_data) {
ruy::profiler::ScopeLabel label("Mul/int32");
auto input1_map = MapAsVector(input1_data, input1_shape);
auto input2_map = MapAsVector(input2_data, input2_shape);
auto output_map = MapAsVector(output_data, output_shape);
if (input1_shape == input2_shape) {
output_map.array() = input1_map.array() * input2_map.array();
} else if (input2_shape.FlatSize() == 1) {
auto scalar = input2_data[0];
output_map.array() = input1_map.array() * scalar;
} else if (input1_shape.FlatSize() == 1) {
auto scalar = input1_data[0];
output_map.array() = scalar * input2_map.array();
} else {
reference_ops::BroadcastMul4DSlow(params, input1_shape, input1_data,
input2_shape, input2_data, output_shape,
output_data);
}
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16* input1_data,
const RuntimeShape& input2_shape, const int16* input2_data,
const RuntimeShape& output_shape, int16* output_data) {
ruy::profiler::ScopeLabel label("Mul/Int16/NoActivation");
// This is a copy of the reference implementation. We do not currently have a
// properly optimized version.
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 unclamped_result =
F0::FromRaw(input1_data[i]) * F0::FromRaw(input2_data[i]);
output_data[i] = unclamped_result.raw();
}
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16* input1_data,
const RuntimeShape& input2_shape, const int16* input2_data,
const RuntimeShape& output_shape, uint8* output_data) {
ruy::profiler::ScopeLabel label("Mul/Int16Uint8");
// This is a copy of the reference implementation. We do not currently have a
// properly optimized version.
const int32 output_activation_min = params.quantized_activation_min;
const int32 output_activation_max = params.quantized_activation_max;
const int32 output_offset = params.output_offset;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 unclamped_result =
F0::FromRaw(input1_data[i]) * F0::FromRaw(input2_data[i]);
int16 rescaled_result =
gemmlowp::RoundingDivideByPOT(unclamped_result.raw(), 8);
int16 clamped_result =
std::min<int16>(output_activation_max - output_offset, rescaled_result);
clamped_result =
std::max<int16>(output_activation_min - output_offset, clamped_result);
output_data[i] = output_offset + clamped_result;
}
}
// Element-wise mul that can often be used for inner loop of broadcast Mul as
// well as the non-broadcast Mul.
inline void MulElementwise(int size, const ArithmeticParams& params,
const uint8* input1_data, const uint8* input2_data,
uint8* output_data) {
int i = 0;
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
TFLITE_DCHECK_GT(params.output_offset, -256);
TFLITE_DCHECK_LT(params.output_offset, 256);
#ifdef USE_NEON
const auto input1_offset_vector = vdupq_n_s16(params.input1_offset);
const auto input2_offset_vector = vdupq_n_s16(params.input2_offset);
const auto output_offset_vector = vdupq_n_s16(params.output_offset);
const auto output_activation_min_vector =
vdup_n_u8(params.quantized_activation_min);
const auto output_activation_max_vector =
vdup_n_u8(params.quantized_activation_max);
const int left_shift = std::max(0, params.output_shift);
const int right_shift = std::max(0, -params.output_shift);
const int32x4_t left_shift_vec = vdupq_n_s32(left_shift);
for (; i <= size - 8; i += 8) {
// We load / store 8 at a time, multiplying as two sets of 4 int32s.
const auto input1_val_original = vld1_u8(input1_data + i);
const auto input2_val_original = vld1_u8(input2_data + i);
const auto input1_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input1_val_original));
const auto input2_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input2_val_original));
const auto input1_val = vaddq_s16(input1_val_s16, input1_offset_vector);
const auto input2_val = vaddq_s16(input2_val_s16, input2_offset_vector);
const auto input1_val_low = vget_low_s16(input1_val);
const auto input1_val_high = vget_high_s16(input1_val);
const auto input2_val_low = vget_low_s16(input2_val);
const auto input2_val_high = vget_high_s16(input2_val);
auto p1 = vmull_s16(input2_val_low, input1_val_low);
auto p2 = vmull_s16(input2_val_high, input1_val_high);
p1 = vshlq_s32(p1, left_shift_vec);
p2 = vshlq_s32(p2, left_shift_vec);
p1 = vqrdmulhq_n_s32(p1, params.output_multiplier);
p2 = vqrdmulhq_n_s32(p2, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
p1 = RoundingDivideByPOT(p1, right_shift);
p2 = RoundingDivideByPOT(p2, right_shift);
const auto p1_narrowed = vqmovn_s32(p1);
const auto p2_narrowed = vqmovn_s32(p2);
const auto p =
vaddq_s16(vcombine_s16(p1_narrowed, p2_narrowed), output_offset_vector);
const auto clamped =
vmax_u8(output_activation_min_vector,
vmin_u8(output_activation_max_vector, vqmovun_s16(p)));
vst1_u8(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
const int32 input1_val = params.input1_offset + input1_data[i];
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<uint8>(clamped_output);
}
}
// Broadcast mul that can often be used for inner loop of broadcast Mul.
inline void MulSimpleBroadcast(int size, const ArithmeticParams& params,
const uint8 broadcast_value,
const uint8* input2_data, uint8* output_data) {
const int16 input1_val = params.input1_offset + broadcast_value;
int i = 0;
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
TFLITE_DCHECK_GT(params.output_offset, -256);
TFLITE_DCHECK_LT(params.output_offset, 256);
#ifdef USE_NEON
const auto input2_offset_vector = vdupq_n_s16(params.input2_offset);
const auto output_offset_vector = vdupq_n_s16(params.output_offset);
const auto output_activation_min_vector =
vdup_n_u8(params.quantized_activation_min);
const auto output_activation_max_vector =
vdup_n_u8(params.quantized_activation_max);
const int left_shift = std::max(0, params.output_shift);
const int right_shift = std::max(0, -params.output_shift);
const int32x4_t left_shift_vec = vdupq_n_s32(left_shift);
for (; i <= size - 8; i += 8) {
// We load / store 8 at a time, multiplying as two sets of 4 int32s.
const auto input2_val_original = vld1_u8(input2_data + i);
const auto input2_val_s16 =
vreinterpretq_s16_u16(vmovl_u8(input2_val_original));
const auto input2_val = vaddq_s16(input2_val_s16, input2_offset_vector);
const auto input2_val_low = vget_low_s16(input2_val);
const auto input2_val_high = vget_high_s16(input2_val);
auto p1 = vmull_n_s16(input2_val_low, input1_val);
auto p2 = vmull_n_s16(input2_val_high, input1_val);
p1 = vshlq_s32(p1, left_shift_vec);
p2 = vshlq_s32(p2, left_shift_vec);
p1 = vqrdmulhq_n_s32(p1, params.output_multiplier);
p2 = vqrdmulhq_n_s32(p2, params.output_multiplier);
using gemmlowp::RoundingDivideByPOT;
p1 = RoundingDivideByPOT(p1, right_shift);
p2 = RoundingDivideByPOT(p2, right_shift);
const auto p1_narrowed = vmovn_s32(p1);
const auto p2_narrowed = vmovn_s32(p2);
const auto p =
vaddq_s16(vcombine_s16(p1_narrowed, p2_narrowed), output_offset_vector);
const auto clamped =
vmax_u8(output_activation_min_vector,
vmin_u8(output_activation_max_vector, vqmovun_s16(p)));
vst1_u8(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
const int32 input2_val = params.input2_offset + input2_data[i];
const int32 unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<uint8>(clamped_output);
}
}
// Broadcast mul that can often be used for inner loop of broadcast Mul.
// This function will handle scalar_value (LHS) * vector_values (RHS).
// Since it's a float function, input params does not matter here.
inline void MulSimpleBroadcast(int size, const ArithmeticParams& params,
const float broadcast_value,
const float* input2_data, float* output_data) {
int i = 0;
#ifdef USE_NEON
const float32x4_t output_activation_min_vector =
vdupq_n_f32(params.float_activation_min);
const float32x4_t output_activation_max_vector =
vdupq_n_f32(params.float_activation_max);
const float32x4_t broadcast_value_dup = vdupq_n_f32(broadcast_value);
for (; i <= size - 4; i += 4) {
const float32x4_t input2_val_original = vld1q_f32(input2_data + i);
const float32x4_t output =
vmulq_f32(input2_val_original, broadcast_value_dup);
const float32x4_t clamped =
vmaxq_f32(output_activation_min_vector,
vminq_f32(output_activation_max_vector, output));
vst1q_f32(output_data + i, clamped);
}
#endif // NEON
for (; i < size; ++i) {
float x = broadcast_value * input2_data[i];
output_data[i] = ActivationFunctionWithMinMax(
x, params.float_activation_min, params.float_activation_max);
}
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8* input1_data,
const RuntimeShape& input2_shape, const uint8* input2_data,
const RuntimeShape& output_shape, uint8* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
ruy::profiler::ScopeLabel label("Mul/8bit");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
}
template <typename T>
inline void BroadcastMulDispatch(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return BroadcastMul4DSlow(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
}
BinaryBroadcastFiveFold(
params, input1_shape, input1_data, input2_shape, input2_data,
output_shape, output_data,
static_cast<void (*)(int, const ArithmeticParams&, const T*, const T*,
T*)>(MulElementwise),
static_cast<void (*)(int, const ArithmeticParams&, T, const T*, T*)>(
MulSimpleBroadcast));
}
inline void BroadcastMulFivefold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const uint8* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const uint8* unswitched_input2_data,
const RuntimeShape& output_shape,
uint8* output_data) {
BroadcastMulDispatch(unswitched_params, unswitched_input1_shape,
unswitched_input1_data, unswitched_input2_shape,
unswitched_input2_data, output_shape, output_data);
}
inline void BroadcastMulFivefold(const ArithmeticParams& params,
const RuntimeShape& unswitched_input1_shape,
const float* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const float* unswitched_input2_data,
const RuntimeShape& output_shape,
float* output_data) {
BroadcastMulDispatch(params, unswitched_input1_shape, unswitched_input1_data,
unswitched_input2_shape, unswitched_input2_data,
output_shape, output_data);
}
// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
// generate max(D1, D2) nested for loops.
// TODO(benoitjacob): BroadcastDiv is intentionally duplicated from
// reference_ops.h. Once an optimized version is implemented and NdArrayDesc<T>
// is no longer referenced in this file, move NdArrayDesc<T> from types.h to
// reference_ops.h.
template <typename T, int N = 5>
void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("BroadcastDivSlow");
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
// typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
//
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
auto div_func = [&](int indexes[N]) {
output_data[SubscriptToIndex(output_desc, indexes)] =
ActivationFunctionWithMinMax(
input1_data[SubscriptToIndex(desc1, indexes)] /
input2_data[SubscriptToIndex(desc2, indexes)],
output_activation_min, output_activation_max);
};
NDOpsHelper<N>(output_desc, div_func);
}
// BroadcastDiv is intentionally duplicated from reference_ops.h.
// For more details see the comment above the generic version of
// BroadcastDivSlow.
template <int N = 5>
inline void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const uint8* input1_data,
const RuntimeShape& unextended_input2_shape,
const uint8* input2_data,
const RuntimeShape& unextended_output_shape,
uint8* output_data) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
TFLITE_DCHECK_GT(params.output_offset, -256);
TFLITE_DCHECK_LT(params.output_offset, 256);
auto div_func = [&](int indexes[N]) {
const int32 input1_val =
params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
const int32 input2_val =
params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
TFLITE_DCHECK_NE(input2_val, 0);
int recip_shift;
const int32 input2_inv =
(input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift)
: -GetReciprocal(-input2_val, 31, &recip_shift);
const int headroom = CountLeadingSignBits(input1_val);
const int32 unscaled_quotient = MultiplyByQuantizedMultiplierGreaterThanOne(
input1_val, input2_inv, headroom);
const int total_shift = params.output_shift - recip_shift - headroom;
const int32 unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplierSmallerThanOneExp(
unscaled_quotient, params.output_multiplier, total_shift);
const int32 clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[SubscriptToIndex(output_desc, indexes)] =
static_cast<uint8>(clamped_output);
};
NDOpsHelper<N>(output_desc, div_func);
}
template <typename T>
inline void SubWithActivation(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("SubWithActivation_optimized");
TFLITE_DCHECK_EQ(input1_shape.FlatSize(), input2_shape.FlatSize());
auto input1_map = MapAsVector(input1_data, input1_shape);
auto input2_map = MapAsVector(input2_data, input2_shape);
auto output_map = MapAsVector(output_data, output_shape);
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
output_map.array() = (input1_map.array() - input2_map.array())
.cwiseMin(activation_max)
.cwiseMax(activation_min);
}
inline void SubNonBroadcast(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const float* input1_data,
const RuntimeShape& input2_shape,
const float* input2_data,
const RuntimeShape& output_shape,
float* output_data) {
ruy::profiler::ScopeLabel label("SubNonBroadcast");
SubWithActivation<float>(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
}
template <typename T>
void Sub(const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("Sub");
auto input1_map = MapAsVector(input1_data, input1_shape);
auto input2_map = MapAsVector(input2_data, input2_shape);
auto output_map = MapAsVector(output_data, output_shape);
if (input1_shape == input2_shape) {
output_map.array() = input1_map.array() - input2_map.array();
} else if (input1_shape.FlatSize() == 1) {
auto scalar = input1_data[0];
output_map.array() = scalar - input2_map.array();
} else if (input2_shape.FlatSize() == 1) {
auto scalar = input2_data[0];
output_map.array() = input1_map.array() - scalar;
} else {
BroadcastSubSlow(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
}
}
inline void LstmCell(
const LstmCellParams& params, const RuntimeShape& unextended_input_shape,
const float* input_data, const RuntimeShape& unextended_prev_activ_shape,
const float* prev_activ_data, const RuntimeShape& weights_shape,
const float* weights_data, const RuntimeShape& unextended_bias_shape,
const float* bias_data, const RuntimeShape& unextended_prev_state_shape,
const float* prev_state_data,
const RuntimeShape& unextended_output_state_shape, float* output_state_data,
const RuntimeShape& unextended_output_activ_shape, float* output_activ_data,
const RuntimeShape& unextended_concat_temp_shape, float* concat_temp_data,
const RuntimeShape& unextended_activ_temp_shape, float* activ_temp_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("LstmCell");
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape prev_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape);
const RuntimeShape bias_shape =
RuntimeShape::ExtendedShape(4, unextended_bias_shape);
const RuntimeShape prev_state_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_state_shape);
const RuntimeShape output_state_shape =
RuntimeShape::ExtendedShape(4, unextended_output_state_shape);
const RuntimeShape output_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_output_activ_shape);
const RuntimeShape concat_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape);
const RuntimeShape activ_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
const int weights_dim_count = weights_shape.DimensionsCount();
MatchingDim( // batches
input_shape, 0, prev_activ_shape, 0, prev_state_shape, 0,
output_state_shape, 0, output_activ_shape, 0);
MatchingDim( // height
input_shape, 1, prev_activ_shape, 1, prev_state_shape, 1,
output_state_shape, 1, output_activ_shape, 1);
MatchingDim( // width
input_shape, 2, prev_activ_shape, 2, prev_state_shape, 2,
output_state_shape, 2, output_activ_shape, 2);
const int input_depth = input_shape.Dims(3);
const int prev_activ_depth = prev_activ_shape.Dims(3);
const int total_input_depth = prev_activ_depth + input_depth;
TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1),
total_input_depth);
TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1);
const int intern_activ_depth =
MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3);
TFLITE_DCHECK_EQ(weights_shape.FlatSize(),
intern_activ_depth * total_input_depth);
TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0);
const int output_depth =
MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape,
3, output_activ_shape, 3);
TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4);
// Concatenate prev_activ and input data together
std::vector<float const*> concat_input_arrays_data;
std::vector<RuntimeShape const*> concat_input_arrays_shapes;
concat_input_arrays_data.push_back(input_data);
concat_input_arrays_data.push_back(prev_activ_data);
concat_input_arrays_shapes.push_back(&input_shape);
concat_input_arrays_shapes.push_back(&prev_activ_shape);
tflite::ConcatenationParams concat_params;
concat_params.axis = 3;
concat_params.inputs_count = concat_input_arrays_data.size();
Concatenation(concat_params, &(concat_input_arrays_shapes[0]),
&(concat_input_arrays_data[0]), concat_temp_shape,
concat_temp_data);
// Fully connected
tflite::FullyConnectedParams fc_params;
fc_params.float_activation_min = std::numeric_limits<float>::lowest();
fc_params.float_activation_max = std::numeric_limits<float>::max();
fc_params.lhs_cacheable = false;
fc_params.rhs_cacheable = false;
FullyConnected(fc_params, concat_temp_shape, concat_temp_data, weights_shape,
weights_data, bias_shape, bias_data, activ_temp_shape,
activ_temp_data, cpu_backend_context);
// Map raw arrays to Eigen arrays so we can use Eigen's optimized array
// operations.
ArrayMap<float> activ_temp_map =
MapAsArrayWithLastDimAsRows(activ_temp_data, activ_temp_shape);
auto input_gate_sm = activ_temp_map.block(0 * output_depth, 0, output_depth,
activ_temp_map.cols());
auto new_input_sm = activ_temp_map.block(1 * output_depth, 0, output_depth,
activ_temp_map.cols());
auto forget_gate_sm = activ_temp_map.block(2 * output_depth, 0, output_depth,
activ_temp_map.cols());
auto output_gate_sm = activ_temp_map.block(3 * output_depth, 0, output_depth,
activ_temp_map.cols());
ArrayMap<const float> prev_state_map =
MapAsArrayWithLastDimAsRows(prev_state_data, prev_state_shape);
ArrayMap<float> output_state_map =
MapAsArrayWithLastDimAsRows(output_state_data, output_state_shape);
ArrayMap<float> output_activ_map =
MapAsArrayWithLastDimAsRows(output_activ_data, output_activ_shape);
// Combined memory state and final output calculation
ruy::profiler::ScopeLabel label2("MemoryStateAndFinalOutput");
output_state_map =
input_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) *
new_input_sm.tanh() +
forget_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) *
prev_state_map;
output_activ_map =
output_gate_sm.unaryExpr(Eigen::internal::scalar_logistic_op<float>()) *
output_state_map.tanh();
}
template <int StateIntegerBits>
inline void LstmCell(
const LstmCellParams& params, const RuntimeShape& unextended_input_shape,
const uint8* input_data_uint8,
const RuntimeShape& unextended_prev_activ_shape,
const uint8* prev_activ_data_uint8, const RuntimeShape& weights_shape,
const uint8* weights_data_uint8, const RuntimeShape& unextended_bias_shape,
const int32* bias_data_int32,
const RuntimeShape& unextended_prev_state_shape,
const int16* prev_state_data_int16,
const RuntimeShape& unextended_output_state_shape,
int16* output_state_data_int16,
const RuntimeShape& unextended_output_activ_shape,
uint8* output_activ_data_uint8,
const RuntimeShape& unextended_concat_temp_shape,
uint8* concat_temp_data_uint8,
const RuntimeShape& unextended_activ_temp_shape,
int16* activ_temp_data_int16, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label(
"LstmCell/quantized (8bit external, 16bit internal)");
int32 weights_zero_point = params.weights_zero_point;
int32 accum_multiplier = params.accum_multiplier;
int accum_shift = params.accum_shift;
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_bias_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_prev_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_state_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_activ_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_concat_temp_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_activ_temp_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape prev_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_activ_shape);
const RuntimeShape bias_shape =
RuntimeShape::ExtendedShape(4, unextended_bias_shape);
const RuntimeShape prev_state_shape =
RuntimeShape::ExtendedShape(4, unextended_prev_state_shape);
const RuntimeShape output_state_shape =
RuntimeShape::ExtendedShape(4, unextended_output_state_shape);
const RuntimeShape output_activ_shape =
RuntimeShape::ExtendedShape(4, unextended_output_activ_shape);
const RuntimeShape concat_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_concat_temp_shape);
const RuntimeShape activ_temp_shape =
RuntimeShape::ExtendedShape(4, unextended_activ_temp_shape);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
// Gather dimensions information, and perform consistency checks.
const int weights_dim_count = weights_shape.DimensionsCount();
const int outer_size = MatchingFlatSizeSkipDim(
input_shape, 3, prev_activ_shape, prev_state_shape, output_state_shape,
output_activ_shape);
const int input_depth = input_shape.Dims(3);
const int prev_activ_depth = prev_activ_shape.Dims(3);
const int total_input_depth = prev_activ_depth + input_depth;
TFLITE_DCHECK_EQ(weights_shape.Dims(weights_dim_count - 1),
total_input_depth);
const int intern_activ_depth =
MatchingDim(weights_shape, weights_dim_count - 2, bias_shape, 3);
TFLITE_DCHECK_EQ(weights_shape.FlatSize(),
intern_activ_depth * total_input_depth);
TFLITE_DCHECK_EQ(FlatSizeSkipDim(bias_shape, 3), 1);
TFLITE_DCHECK_EQ(intern_activ_depth % 4, 0);
const int output_depth =
MatchingDim(prev_state_shape, 3, prev_activ_shape, 3, output_state_shape,
3, output_activ_shape, 3);
TFLITE_DCHECK_EQ(output_depth, intern_activ_depth / 4);
const int fc_batches = FlatSizeSkipDim(activ_temp_shape, 3);
const int fc_output_depth =
MatchingDim(weights_shape, weights_dim_count - 2, activ_temp_shape, 3);
const int fc_accum_depth = total_input_depth;
TFLITE_DCHECK_EQ(fc_output_depth, 4 * output_depth);
// Depth-concatenate prev_activ and input data together.
uint8 const* concat_input_arrays_data[2] = {input_data_uint8,
prev_activ_data_uint8};
const RuntimeShape* concat_input_arrays_shapes[2] = {&input_shape,
&prev_activ_shape};
tflite::ConcatenationParams concat_params;
concat_params.axis = 3;
concat_params.inputs_count = 2;
Concatenation(concat_params, concat_input_arrays_shapes,
concat_input_arrays_data, concat_temp_shape,
concat_temp_data_uint8);
// Implementation of the fully connected node inside the LSTM cell.
// The operands are 8-bit integers, the accumulators are internally 32bit
// integers, and the output is 16-bit fixed-point with 3 integer bits so
// the output range is [-2^3, 2^3] == [-8, 8]. The rationale for that
// is explained in the function comment above.
cpu_backend_gemm::MatrixParams<uint8> lhs_params;
lhs_params.rows = fc_output_depth;
lhs_params.cols = fc_accum_depth;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.zero_point = weights_zero_point;
cpu_backend_gemm::MatrixParams<uint8> rhs_params;
rhs_params.rows = fc_accum_depth;
rhs_params.cols = fc_batches;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.zero_point = 128;
cpu_backend_gemm::MatrixParams<int16> dst_params;
dst_params.rows = fc_output_depth;
dst_params.cols = fc_batches;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.zero_point = 0;
cpu_backend_gemm::GemmParams<int32, int16> gemm_params;
gemm_params.bias = bias_data_int32;
gemm_params.multiplier_fixedpoint = accum_multiplier;
gemm_params.multiplier_exponent = accum_shift;
cpu_backend_gemm::Gemm(
lhs_params, weights_data_uint8, rhs_params, concat_temp_data_uint8,
dst_params, activ_temp_data_int16, gemm_params, cpu_backend_context);
// Rest of the LSTM cell: tanh and logistic math functions, and some adds
// and muls, all done in 16-bit fixed-point.
const int16* input_gate_input_ptr = activ_temp_data_int16;
const int16* input_modulation_gate_input_ptr =
activ_temp_data_int16 + output_depth;
const int16* forget_gate_input_ptr = activ_temp_data_int16 + 2 * output_depth;
const int16* output_gate_input_ptr = activ_temp_data_int16 + 3 * output_depth;
const int16* prev_state_ptr = prev_state_data_int16;
int16* output_state_data_ptr = output_state_data_int16;
uint8* output_activ_data_ptr = output_activ_data_uint8;
for (int b = 0; b < outer_size; ++b) {
int c = 0;
#ifdef GEMMLOWP_NEON
for (; c <= output_depth - 8; c += 8) {
// Define the fixed-point data types that we will use here. All use
// int16 as the underlying integer type i.e. all are 16-bit fixed-point.
// They only differ by the number of integral vs. fractional bits,
// determining the range of values that they can represent.
//
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<int16x8_t, 0>;
// F3 uses 3 integer bits, range [-8, 8].
// This is the range of the previous fully-connected node's output,
// which is our input here.
using F3 = gemmlowp::FixedPoint<int16x8_t, 3>;
// FS uses StateIntegerBits integer bits, range [-2^StateIntegerBits,
// 2^StateIntegerBits]. It's used to represent the internal state, whose
// number of integer bits is currently dictated by the model. See comment
// on the StateIntegerBits template parameter above.
using FS = gemmlowp::FixedPoint<int16x8_t, StateIntegerBits>;
// Implementation of input gate, using fixed-point logistic function.
F3 input_gate_input = F3::FromRaw(vld1q_s16(input_gate_input_ptr));
input_gate_input_ptr += 8;
F0 input_gate_output = gemmlowp::logistic(input_gate_input);
// Implementation of input modulation gate, using fixed-point tanh
// function.
F3 input_modulation_gate_input =
F3::FromRaw(vld1q_s16(input_modulation_gate_input_ptr));
input_modulation_gate_input_ptr += 8;
F0 input_modulation_gate_output =
gemmlowp::tanh(input_modulation_gate_input);
// Implementation of forget gate, using fixed-point logistic function.
F3 forget_gate_input = F3::FromRaw(vld1q_s16(forget_gate_input_ptr));
forget_gate_input_ptr += 8;
F0 forget_gate_output = gemmlowp::logistic(forget_gate_input);
// Implementation of output gate, using fixed-point logistic function.
F3 output_gate_input = F3::FromRaw(vld1q_s16(output_gate_input_ptr));
output_gate_input_ptr += 8;
F0 output_gate_output = gemmlowp::logistic(output_gate_input);
// Implementation of internal multiplication nodes, still in fixed-point.
F0 input_times_input_modulation =
input_gate_output * input_modulation_gate_output;
FS prev_state = FS::FromRaw(vld1q_s16(prev_state_ptr));
prev_state_ptr += 8;
FS prev_state_times_forget_state = forget_gate_output * prev_state;
// Implementation of internal addition node, saturating.
FS new_state = gemmlowp::SaturatingAdd(
gemmlowp::Rescale<StateIntegerBits>(input_times_input_modulation),
prev_state_times_forget_state);
// Implementation of last internal Tanh node, still in fixed-point.
// Since a Tanh fixed-point implementation is specialized for a given
// number or integer bits, and each specialization can have a substantial
// code size, and we already used above a Tanh on an input with 3 integer
// bits, and per the table in the above function comment there is no
// significant accuracy to be lost by clamping to [-8, +8] for a
// 3-integer-bits representation, let us just do that. This helps people
// porting this to targets where code footprint must be minimized.
F3 new_state_f3 = gemmlowp::Rescale<3>(new_state);
F0 output_activ_int16 = output_gate_output * gemmlowp::tanh(new_state_f3);
// Store the new internal state back to memory, as 16-bit integers.
// Note: here we store the original value with StateIntegerBits, not
// the rescaled 3-integer-bits value fed to tanh.
vst1q_s16(output_state_data_ptr, new_state.raw());
output_state_data_ptr += 8;
// Down-scale the output activations to 8-bit integers, saturating,
// and store back to memory.
int16x8_t rescaled_output_activ =
gemmlowp::RoundingDivideByPOT(output_activ_int16.raw(), 8);
int8x8_t int8_output_activ = vqmovn_s16(rescaled_output_activ);
uint8x8_t uint8_output_activ =
vadd_u8(vdup_n_u8(128), vreinterpret_u8_s8(int8_output_activ));
vst1_u8(output_activ_data_ptr, uint8_output_activ);
output_activ_data_ptr += 8;
}
#endif
for (; c < output_depth; ++c) {
// Define the fixed-point data types that we will use here. All use
// int16 as the underlying integer type i.e. all are 16-bit fixed-point.
// They only differ by the number of integral vs. fractional bits,
// determining the range of values that they can represent.
//
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
// F3 uses 3 integer bits, range [-8, 8].
// This is the range of the previous fully-connected node's output,
// which is our input here.
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
// FS uses StateIntegerBits integer bits, range [-2^StateIntegerBits,
// 2^StateIntegerBits]. It's used to represent the internal state, whose
// number of integer bits is currently dictated by the model. See comment
// on the StateIntegerBits template parameter above.
using FS = gemmlowp::FixedPoint<std::int16_t, StateIntegerBits>;
// Implementation of input gate, using fixed-point logistic function.
F3 input_gate_input = F3::FromRaw(*input_gate_input_ptr++);
F0 input_gate_output = gemmlowp::logistic(input_gate_input);
// Implementation of input modulation gate, using fixed-point tanh
// function.
F3 input_modulation_gate_input =
F3::FromRaw(*input_modulation_gate_input_ptr++);
F0 input_modulation_gate_output =
gemmlowp::tanh(input_modulation_gate_input);
// Implementation of forget gate, using fixed-point logistic function.
F3 forget_gate_input = F3::FromRaw(*forget_gate_input_ptr++);
F0 forget_gate_output = gemmlowp::logistic(forget_gate_input);
// Implementation of output gate, using fixed-point logistic function.
F3 output_gate_input = F3::FromRaw(*output_gate_input_ptr++);
F0 output_gate_output = gemmlowp::logistic(output_gate_input);
// Implementation of internal multiplication nodes, still in fixed-point.
F0 input_times_input_modulation =
input_gate_output * input_modulation_gate_output;
FS prev_state = FS::FromRaw(*prev_state_ptr++);
FS prev_state_times_forget_state = forget_gate_output * prev_state;
// Implementation of internal addition node, saturating.
FS new_state = gemmlowp::SaturatingAdd(
gemmlowp::Rescale<StateIntegerBits>(input_times_input_modulation),
prev_state_times_forget_state);
// Implementation of last internal Tanh node, still in fixed-point.
// Since a Tanh fixed-point implementation is specialized for a given
// number or integer bits, and each specialization can have a substantial
// code size, and we already used above a Tanh on an input with 3 integer
// bits, and per the table in the above function comment there is no
// significant accuracy to be lost by clamping to [-8, +8] for a
// 3-integer-bits representation, let us just do that. This helps people
// porting this to targets where code footprint must be minimized.
F3 new_state_f3 = gemmlowp::Rescale<3>(new_state);
F0 output_activ_int16 = output_gate_output * gemmlowp::tanh(new_state_f3);
// Store the new internal state back to memory, as 16-bit integers.
// Note: here we store the original value with StateIntegerBits, not
// the rescaled 3-integer-bits value fed to tanh.
*output_state_data_ptr++ = new_state.raw();
// Down-scale the output activations to 8-bit integers, saturating,
// and store back to memory.
int16 rescaled_output_activ =
gemmlowp::RoundingDivideByPOT(output_activ_int16.raw(), 8);
int16 clamped_output_activ =
std::max<int16>(-128, std::min<int16>(127, rescaled_output_activ));
*output_activ_data_ptr++ = 128 + clamped_output_activ;
}
input_gate_input_ptr += 3 * output_depth;
input_modulation_gate_input_ptr += 3 * output_depth;
forget_gate_input_ptr += 3 * output_depth;
output_gate_input_ptr += 3 * output_depth;
}
}
inline int NodeOffset(int b, int h, int w, int height, int width) {
return (b * height + h) * width + w;
}
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("AveragePool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
// TODO(benoitjacob) make this a proper reference impl without Eigen!
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
// TODO(benoitjacob) get rid of the dynamic memory allocation here!
Eigen::VectorXf out_count(out_mat.cols());
out_count.setZero();
// Prefill the output to 0.
out_mat.setZero();
for (int b = 0; b < batches; ++b) {
for (int h = 0; h < input_height; ++h) {
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
int hpad = h + params.padding_values.height;
int wpad = w + params.padding_values.width;
int h_start = (hpad < params.filter_height)
? 0
: (hpad - params.filter_height) / stride_height + 1;
int h_end = std::min(hpad / stride_height + 1, output_height);
int w_start = (wpad < params.filter_width)
? 0
: (wpad - params.filter_width) / stride_width + 1;
int w_end = std::min(wpad / stride_width + 1, output_width);
// compute elementwise sum
for (int ph = h_start; ph < h_end; ++ph) {
for (int pw = w_start; pw < w_end; ++pw) {
int out_offset = NodeOffset(b, ph, pw, output_height, output_width);
out_mat.col(out_offset) +=
in_mat.col(NodeOffset(b, h, w, input_height, input_width));
out_count(out_offset)++;
}
}
}
}
}
// Divide the output by the actual number of elements being averaged over
TFLITE_DCHECK_GT(out_count.minCoeff(), 0);
out_mat.array().rowwise() /= out_count.transpose().array();
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(output_data[i],
params.float_activation_min,
params.float_activation_max);
}
}
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const uint8* input_data,
const RuntimeShape& output_shape, uint8* output_data) {
ruy::profiler::ScopeLabel label("AveragePool/8bit");
// Here, and in other pooling ops, in order to maintain locality of reference,
// to minimize some recalculations, and to load into NEON vector registers, we
// use an inner loop down the depth. Since depths can be large and hence we
// would need arbitrarily large temporary storage, we divide the work up into
// depth tranches just within the batch loop.
static constexpr int kPoolingAccTrancheSize = 256;
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
uint32 acc[kPoolingAccTrancheSize];
for (int batch = 0; batch < batches; ++batch) {
// We proceed through the depth in tranches (see comment above). The
// depth_base is the depth at the beginning of the tranche. The
// tranche_depth is the depth dimension of the tranche.
for (int depth_base = 0; depth_base < depth;
depth_base += kPoolingAccTrancheSize) {
const int tranche_depth =
std::min(depth - depth_base, kPoolingAccTrancheSize);
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
const int filter_count =
(filter_x_end - filter_x_start) * (filter_y_end - filter_y_start);
memset(acc, 0, tranche_depth * sizeof(acc[0]));
const uint8* input_ptr =
input_data + depth_base +
depth * (in_x_origin +
input_width * (in_y_origin + input_height * batch));
for (int fy = filter_y_start; fy < filter_y_end; fy++) {
const uint8* input_row_ptr =
input_ptr + depth * (fy * input_width + filter_x_start);
for (int fx = filter_x_start; fx < filter_x_end; fx++) {
const uint8* input_channel_ptr = input_row_ptr;
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 16; channel += 16) {
uint16x4_t acc_reg[4];
uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
input_channel_ptr += 16;
acc_reg[0] = vget_low_u16(vmovl_u8(vget_low_u8(input_reg)));
acc_reg[1] = vget_high_u16(vmovl_u8(vget_low_u8(input_reg)));
acc_reg[2] = vget_low_u16(vmovl_u8(vget_high_u8(input_reg)));
acc_reg[3] = vget_high_u16(vmovl_u8(vget_high_u8(input_reg)));
for (int i = 0; i < 4; i++) {
vst1q_u32(
acc + channel + 4 * i,
vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
}
}
for (; channel <= tranche_depth - 8; channel += 8) {
uint16x4_t acc_reg[2];
uint16x8_t input_reg = vmovl_u8(vld1_u8(input_channel_ptr));
input_channel_ptr += 8;
acc_reg[0] = vget_low_u16(input_reg);
acc_reg[1] = vget_high_u16(input_reg);
for (int i = 0; i < 2; i++) {
vst1q_u32(
acc + channel + 4 * i,
vaddw_u16(vld1q_u32(acc + channel + 4 * i), acc_reg[i]));
}
}
#endif
for (; channel < tranche_depth; ++channel) {
acc[channel] += *input_channel_ptr++;
}
input_row_ptr += depth;
}
}
uint8* output_ptr = output_data + Offset(output_shape, batch, out_y,
out_x, depth_base);
int channel = 0;
#ifdef USE_NEON
#define AVGPOOL_DIVIDING_BY(FILTER_COUNT) \
if (filter_count == FILTER_COUNT) { \
for (; channel <= tranche_depth - 8; channel += 8) { \
uint16 buf[8]; \
for (int i = 0; i < 8; i++) { \
buf[i] = (acc[channel + i] + FILTER_COUNT / 2) / FILTER_COUNT; \
} \
uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf)); \
buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max)); \
buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min)); \
vst1_u8(output_ptr + channel, buf8); \
} \
}
AVGPOOL_DIVIDING_BY(9)
AVGPOOL_DIVIDING_BY(15)
#undef AVGPOOL_DIVIDING_BY
for (; channel <= tranche_depth - 8; channel += 8) {
uint16 buf[8];
for (int i = 0; i < 8; i++) {
buf[i] = (acc[channel + i] + filter_count / 2) / filter_count;
}
uint8x8_t buf8 = vqmovn_u16(vld1q_u16(buf));
buf8 = vmin_u8(buf8, vdup_n_u8(params.quantized_activation_max));
buf8 = vmax_u8(buf8, vdup_n_u8(params.quantized_activation_min));
vst1_u8(output_ptr + channel, buf8);
}
#endif
for (; channel < tranche_depth; ++channel) {
uint16 a = (acc[channel] + filter_count / 2) / filter_count;
a = std::max<uint16>(a, params.quantized_activation_min);
a = std::min<uint16>(a, params.quantized_activation_max);
output_ptr[channel] = static_cast<uint8>(a);
}
}
}
}
}
}
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
ruy::profiler::ScopeLabel label("MaxPool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
// Prefill the output to minimum representable float value
out_mat.setConstant(std::numeric_limits<float>::lowest());
for (int b = 0; b < batches; ++b) {
for (int h = 0; h < input_height; ++h) {
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
int hpad = h + params.padding_values.height;
int wpad = w + params.padding_values.width;
int h_start = (hpad < params.filter_height)
? 0
: (hpad - params.filter_height) / stride_height + 1;
int h_end = std::min(hpad / stride_height + 1, output_height);
int w_start = (wpad < params.filter_width)
? 0
: (wpad - params.filter_width) / stride_width + 1;
int w_end = std::min(wpad / stride_width + 1, output_width);
// compute elementwise sum
for (int ph = h_start; ph < h_end; ++ph) {
for (int pw = w_start; pw < w_end; ++pw) {
int out_offset = NodeOffset(b, ph, pw, output_height, output_width);
out_mat.col(out_offset) =
out_mat.col(out_offset)
.cwiseMax(in_mat.col(
NodeOffset(b, h, w, input_height, input_width)));
}
}
}
}
}
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(output_data[i],
params.float_activation_min,
params.float_activation_max);
}
}
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& output_shape,
uint8* output_data) {
ruy::profiler::ScopeLabel label("MaxPool/8bit");
// Here, and in other pooling ops, in order to maintain locality of reference,
// to minimize some recalculations, and to load into NEON vector registers, we
// use an inner loop down the depth. Since depths can be large and hence we
// would need arbitrarily large temporary storage, we divide the work up into
// depth tranches just within the batch loop.
static constexpr int kPoolingAccTrancheSize = 256;
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
uint8 acc[kPoolingAccTrancheSize];
for (int batch = 0; batch < batches; ++batch) {
// We proceed through the depth in tranches (see comment above). The
// depth_base is the depth at the beginning of the tranche. The
// tranche_depth is the depth dimension of the tranche.
for (int depth_base = 0; depth_base < depth;
depth_base += kPoolingAccTrancheSize) {
const int tranche_depth =
std::min(depth - depth_base, kPoolingAccTrancheSize);
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
memset(acc, 0, tranche_depth * sizeof(acc[0]));
const uint8* input_ptr =
input_data + depth_base +
depth * (in_x_origin +
input_width * (in_y_origin + input_height * batch));
for (int fy = filter_y_start; fy < filter_y_end; fy++) {
const uint8* input_row_ptr =
input_ptr + depth * (fy * input_width + filter_x_start);
for (int fx = filter_x_start; fx < filter_x_end; fx++) {
const uint8* input_channel_ptr = input_row_ptr;
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 16; channel += 16) {
uint8x16_t acc_reg = vld1q_u8(acc + channel);
uint8x16_t input_reg = vld1q_u8(input_channel_ptr);
input_channel_ptr += 16;
acc_reg = vmaxq_u8(acc_reg, input_reg);
vst1q_u8(acc + channel, acc_reg);
}
for (; channel <= tranche_depth - 8; channel += 8) {
uint8x8_t acc_reg = vld1_u8(acc + channel);
uint8x8_t input_reg = vld1_u8(input_channel_ptr);
input_channel_ptr += 8;
acc_reg = vmax_u8(acc_reg, input_reg);
vst1_u8(acc + channel, acc_reg);
}
#endif
for (; channel < tranche_depth; ++channel) {
acc[channel] = std::max(acc[channel], *input_channel_ptr++);
}
input_row_ptr += depth;
}
}
uint8* output_ptr = output_data + Offset(output_shape, batch, out_y,
out_x, depth_base);
int channel = 0;
#ifdef USE_NEON
for (; channel <= tranche_depth - 16; channel += 16) {
uint8x16_t a = vld1q_u8(acc + channel);
a = vminq_u8(a, vdupq_n_u8(params.quantized_activation_max));
a = vmaxq_u8(a, vdupq_n_u8(params.quantized_activation_min));
vst1q_u8(output_ptr + channel, a);
}
for (; channel <= tranche_depth - 8; channel += 8) {
uint8x8_t a = vld1_u8(acc + channel);
a = vmin_u8(a, vdup_n_u8(params.quantized_activation_max));
a = vmax_u8(a, vdup_n_u8(params.quantized_activation_min));
vst1_u8(output_ptr + channel, a);
}
#endif
for (; channel < tranche_depth; ++channel) {
uint8 a = acc[channel];
a = std::max<uint8>(a, params.quantized_activation_min);
a = std::min<uint8>(a, params.quantized_activation_max);
output_ptr[channel] = static_cast<uint8>(a);
}
}
}
}
}
}
inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
ruy::profiler::ScopeLabel label("L2Pool");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
// Actually carry out L2 Pool. Code is written in forward mode: we go through
// the input values once, and write to all the pooled regions that it maps to.
const auto in_mat = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto out_mat = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
Eigen::VectorXf in_square(in_mat.rows());
Eigen::VectorXf out_count(out_mat.cols());
out_count.setZero();
// Prefill the output to 0.
out_mat.setZero();
for (int b = 0; b < batches; ++b) {
for (int h = 0; h < input_height; ++h) {
for (int w = 0; w < input_width; ++w) {
// (h_start, h_end) * (w_start, w_end) is the range that the input
// vector projects to.
const int hpad = h + params.padding_values.height;
const int wpad = w + params.padding_values.width;
const int h_start =
(hpad < params.filter_height)
? 0
: (hpad - params.filter_height) / stride_height + 1;
const int h_end = std::min(hpad / stride_height + 1, output_height);
const int w_start =
(wpad < params.filter_width)
? 0
: (wpad - params.filter_width) / stride_width + 1;
const int w_end = std::min(wpad / stride_width + 1, output_width);
// pre-compute square
const int in_offset = w + input_width * (h + input_height * b);
in_square =
in_mat.col(in_offset).array() * in_mat.col(in_offset).array();
// compute elementwise sum of squares
for (int ph = h_start; ph < h_end; ++ph) {
for (int pw = w_start; pw < w_end; ++pw) {
const int out_offset = pw + output_width * (ph + output_height * b);
out_mat.col(out_offset) += in_square;
out_count(out_offset)++;
}
}
}
}
}
out_count = out_count.array().inverse();
out_mat =
(out_mat.array().rowwise() * out_count.transpose().array()).cwiseSqrt();
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(output_data[i],
params.float_activation_min,
params.float_activation_max);
}
}
inline void LocalResponseNormalization(
const tflite::LocalResponseNormalizationParams& op_params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("LocalResponseNormalization");
MatchingFlatSize(input_shape, output_shape);
const auto data_in = MapAsMatrixWithLastDimAsRows(input_data, input_shape);
auto data_out = MapAsMatrixWithLastDimAsRows(output_data, output_shape);
// Carry out local response normalization, vector by vector.
// Since the data are stored column major, making row-wise operation
// probably not memory efficient anyway, we do an explicit for loop over
// the columns.
const int double_range = op_params.range * 2;
Eigen::VectorXf padded_square(data_in.rows() + double_range);
padded_square.setZero();
const float bias = op_params.bias;
for (int r = 0; r < data_in.cols(); ++r) {
// Do local response normalization for data_in(:, r)
// first, compute the square and store them in buffer for repeated use
padded_square.block(op_params.range, 0, data_in.rows(), 1) =
data_in.col(r).cwiseProduct(data_in.col(r)) * op_params.alpha;
// Then, compute the scale and writes them to data_out
float accumulated_scale = 0;
for (int i = 0; i < double_range; ++i) {
accumulated_scale += padded_square(i);
}
for (int i = 0; i < data_in.rows(); ++i) {
accumulated_scale += padded_square(i + double_range);
data_out(i, r) = bias + accumulated_scale;
accumulated_scale -= padded_square(i);
}
}
// In a few cases, the pow computation could benefit from speedups.
if (op_params.beta == 1) {
data_out.array() = data_in.array() * data_out.array().inverse();
} else if (op_params.beta == 0.5f) {
data_out.array() = data_in.array() * data_out.array().sqrt().inverse();
} else {
data_out.array() = data_in.array() * data_out.array().pow(-op_params.beta);
}
}
inline void SoftmaxImpl(const SoftmaxParams& params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, float* output_data,
int start_batch, int end_batch) {
ruy::profiler::ScopeLabel label("Softmax/Impl");
MatchingFlatSize(input_shape, output_shape);
const int logit_size = input_shape.Dims(input_shape.DimensionsCount() - 1);
const MatrixMap<const float> in_mat(input_data + logit_size * start_batch,
logit_size, end_batch - start_batch);
MatrixMap<float> out_mat(output_data + logit_size * start_batch, logit_size,
end_batch - start_batch);
// Compute the exponential first, removing the max coefficient for numerical
// stability.
out_mat =
(in_mat.rowwise() - in_mat.colwise().maxCoeff()).array() * params.beta;
// We are separating out the exp function so that exp can be vectorized.
out_mat = out_mat.array().exp();
// Normalize to get the activations.
Eigen::Array<float, 1, Eigen::Dynamic> scale =
out_mat.array().colwise().sum().inverse();
out_mat.array().rowwise() *= scale;
}
struct SoftmaxWorkerTask : cpu_backend_threadpool::Task {
SoftmaxWorkerTask(const SoftmaxParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data,
int start_batch, int end_batch)
: params(params),
input_shape(input_shape),
input_data(input_data),
output_shape(output_shape),
output_data(output_data),
start_batch(start_batch),
end_batch(end_batch) {}
void Run() override {
SoftmaxImpl(params, input_shape, input_data, output_shape, output_data,
start_batch, end_batch);
}
private:
const tflite::SoftmaxParams& params;
const RuntimeShape& input_shape;
const float* input_data;
const RuntimeShape& output_shape;
float* output_data;
int start_batch;
int end_batch;
};
inline void Softmax(const SoftmaxParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data,
CpuBackendContext* cpu_backend_context = nullptr) {
ruy::profiler::ScopeLabel label("Softmax");
// We picture softmax input as a 2-D matrix while the last dim is the logit
// dim, and the rest dims will be the batch dim for the 2-D matrix.
const int batch_size =
FlatSizeSkipDim(input_shape, input_shape.DimensionsCount() - 1);
constexpr int kMinBatchPerThread = 8;
int thread_count = batch_size / kMinBatchPerThread;
thread_count = thread_count > 0 ? thread_count : 1;
const int capped_thread_count =
cpu_backend_context == nullptr
? 1
: std::min(thread_count, cpu_backend_context->max_num_threads());
if (capped_thread_count == 1) {
SoftmaxImpl(params, input_shape, input_data, output_shape, output_data, 0,
batch_size);
} else {
std::vector<SoftmaxWorkerTask> tasks;
// TODO(b/131746020) don't create new heap allocations every time.
// At least we make it a single heap allocation by using reserve().
tasks.reserve(capped_thread_count);
int batch_start = 0;
for (int i = 0; i < capped_thread_count; ++i) {
// Try to distribute the tasks as even as possible.
int batch_end =
batch_start + (batch_size - batch_start) / (capped_thread_count - i);
tasks.emplace_back(params, input_shape, input_data, output_shape,
output_data, batch_start, batch_end);
batch_start = batch_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
}
template <typename T>
inline int32_t QuantizeSoftmaxOutput(float prob_rescaled, int32_t zero_point) {
const int32_t prob_rnd = static_cast<int32_t>(std::round(prob_rescaled));
return prob_rnd + zero_point;
}
#if !__aarch64__
// With ARM64, rounding is faster than add + truncation.
template <>
inline int32_t QuantizeSoftmaxOutput<uint8_t>(float prob_rescaled,
int32_t zero_point) {
return static_cast<int32_t>(prob_rescaled + 0.5f);
}
#endif
inline void PopulateSoftmaxLookupTable(SoftmaxParams* data, float input_scale,
float beta) {
const float scale = -input_scale * beta;
const int32_t max_uint8 = std::numeric_limits<uint8_t>::max();
for (int32_t val = 0; val <= max_uint8; ++val) {
data->table[max_uint8 - val] = expf(scale * val);
}
}
template <typename In, typename Out>
inline void Softmax(const SoftmaxParams& params,
const RuntimeShape& input_shape, const In* input_data,
const RuntimeShape& output_shape, Out* output_data) {
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int excluding_last_dim =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int last_dim =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
const int32_t clamp_max = std::numeric_limits<Out>::max();
const int32_t clamp_min = std::numeric_limits<Out>::min();
for (int i = 0; i < excluding_last_dim; ++i) {
int32_t max_val = std::numeric_limits<In>::min();
// Find max quantized value.
for (int j = 0; j < last_dim; ++j) {
max_val = std::max(max_val, static_cast<int32_t>(input_data[j]));
}
float sum_exp = 0.0f;
const int32_t max_uint8 = std::numeric_limits<uint8_t>::max();
const float* table_offset = ¶ms.table[max_uint8 - max_val];
// Calculate normalizer sum(exp(x)).
for (int j = 0; j < last_dim; ++j) {
sum_exp += table_offset[input_data[j]];
}
const float inv_sum_exp = 1.0f / (sum_exp * params.scale);
// Normalize and quantize probabilities.
for (int j = 0; j < last_dim; ++j) {
const float prob_rescaled = table_offset[input_data[j]] * inv_sum_exp;
const int32_t prob_quantized =
QuantizeSoftmaxOutput<Out>(prob_rescaled, params.zero_point);
output_data[j] = static_cast<Out>(
std::max(std::min(clamp_max, prob_quantized), clamp_min));
}
input_data += last_dim;
output_data += last_dim;
}
}
// Here's the softmax LUT optimization strategy:
// For softmax, we can do some mathmetically equivalent transformation:
//
// softmax(x) = e^x / sum(e^x, 0...n) ===> equals to
// softmax(x) = e^(x - CONST) / sum(e^(x - CONST), 0...n)
//
// For quantization, `x` in our case is (input_q - input_zp) * input_s
// For uint8 case (int8 can be handled similarly), the range is [0, 255]
//
// so if we let
// CONST = (255 - input_zp) * input_s
// then we will have:
// softmax(x) = e^((input_q - 255) * input_s) --------- (1)
// /
// sum(e^(input_q - 255) * input_s, 0...n) -------- (2)
//
// the good thing about (1) is it's within the range of (0, 1), so we can
// approximate its result with uint16.
// (1) = uint8_out * 1 / 2^16.
//
// so (1) is lookup_uint8_table(input_zp) * 1 / 2^16.
// then (2) is essentially the following:
// sum(lookup_uint8_table(input_zp), 0...n) / 2^16.
//
// since (output_q - output_zp) * output_s = softmax(x)
// output_q = lookup_uint8_table(input_zp)
// /
// (sum(lookup_uint8_table(input_zp), 0...n) * output_s)
// +
// output_zp
//
// We can actually further improve the performance by using uint8 instead of
// uint16. But that we may lose some accuracy, so we need to pay attention
// to that.
inline void PopulateSoftmaxUInt8LookupTable(SoftmaxParams* data,
float input_scale, float beta) {
const float scale = input_scale * beta;
const int32_t max_uint8 = std::numeric_limits<uint8_t>::max();
const int32_t max_uint16 = std::numeric_limits<uint16_t>::max();
for (int32_t val = 0; val <= max_uint8; ++val) {
float input_to_exp = scale * (val - max_uint8);
int32_t temp = static_cast<int>(expf(input_to_exp) * max_uint16 + 0.5);
temp = std::min(max_uint16, temp);
uint8_t part1 = temp >> 8;
uint8_t part2 = temp & 0xff;
data->uint8_table1[val] = static_cast<uint8_t>(part1);
data->uint8_table2[val] = static_cast<uint8_t>(part2);
}
}
inline int FindMaxValue(int size, const uint8_t* input_data, uint8_t offset) {
int32_t max_val = std::numeric_limits<uint8_t>::min();
int j = 0;
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
uint8x16_t max_val_dup = vdupq_n_u8(max_val);
uint8x16_t offset_dup = vdupq_n_u8(offset);
for (; j <= size - 16; j += 16) {
uint8x16_t input_value = vld1q_u8(input_data + j);
input_value = veorq_u8(input_value, offset_dup);
max_val_dup = vmaxq_u8(input_value, max_val_dup);
}
max_val = std::max(max_val, static_cast<int32>(vmaxvq_u8(max_val_dup)));
#endif
for (; j < size; ++j) {
max_val = std::max(max_val, static_cast<int32_t>(input_data[j] ^ offset));
}
return max_val;
}
#ifdef USE_NEON
// Value_to_store layout:
// [high_high, high_low, low_high, low_low].
inline void StoreValue(int32x4x4_t value_to_store, int8_t* output) {
const int16x8_t result_1 = vcombine_s16(vqmovn_s32(value_to_store.val[1]),
vqmovn_s32(value_to_store.val[0]));
const int16x8_t result_2 = vcombine_s16(vqmovn_s32(value_to_store.val[3]),
vqmovn_s32(value_to_store.val[2]));
const int8x16_t result =
vcombine_s8(vqmovn_s16(result_2), vqmovn_s16(result_1));
vst1q_s8(output, result);
}
// Value_to_store layout:
// [high_high, high_low, low_high, low_low].
inline void StoreValue(int32x4x4_t value_to_store, uint8_t* output) {
const uint16x8_t result_1 =
vcombine_u16(vqmovn_u32(vreinterpretq_u32_s32(value_to_store.val[1])),
vqmovn_u32(vreinterpretq_u32_s32(value_to_store.val[0])));
const uint16x8_t result_2 =
vcombine_u16(vqmovn_u32(vreinterpretq_u32_s32(value_to_store.val[3])),
vqmovn_u32(vreinterpretq_u32_s32(value_to_store.val[2])));
const uint8x16_t result =
vcombine_u8(vqmovn_u16(result_2), vqmovn_u16(result_1));
vst1q_u8(output, result);
}
#endif
template <typename In, typename Out>
inline void SoftmaxInt8LUT(const SoftmaxParams& params,
const RuntimeShape& input_shape,
const In* input_data,
const RuntimeShape& output_shape, Out* output_data) {
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int excluding_last_dim =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int last_dim =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
const int32_t clamp_max = std::numeric_limits<Out>::max();
const int32_t clamp_min = std::numeric_limits<Out>::min();
// Offset is used to interpret the input data "correctly".
// If the input is uint8, the data will be unchanged.
// If the input is int8, since it will be reinterpret as uint8.
// e.g.,
// int8 127 will be applied "offset" to become 255 in uint8.
uint8_t offset = 0;
if (std::is_same<In, int8>::value) {
offset = 0x80;
}
const uint8_t* input_data_uint = reinterpret_cast<const uint8_t*>(input_data);
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
// This code uses ARM64-only instructions.
// TODO(b/143709993): Port to ARMv7
// Load the tables into registers. (4*4 128-bit registers)
uint8x16x4_t table1[4];
table1[0] = vld1q_u8_x4(params.uint8_table1 + 16 * 4 * 0);
table1[1] = vld1q_u8_x4(params.uint8_table1 + 16 * 4 * 1);
table1[2] = vld1q_u8_x4(params.uint8_table1 + 16 * 4 * 2);
table1[3] = vld1q_u8_x4(params.uint8_table1 + 16 * 4 * 3);
uint8x16x4_t table2[4];
table2[0] = vld1q_u8_x4(params.uint8_table2 + 16 * 4 * 0);
table2[1] = vld1q_u8_x4(params.uint8_table2 + 16 * 4 * 1);
table2[2] = vld1q_u8_x4(params.uint8_table2 + 16 * 4 * 2);
table2[3] = vld1q_u8_x4(params.uint8_table2 + 16 * 4 * 3);
#endif
for (int i = 0; i < excluding_last_dim; ++i) {
// Find max quantized value.
int32_t max_val = FindMaxValue(last_dim, input_data_uint, offset);
int32 sum_exp = 0;
const int32_t max_uint8 = std::numeric_limits<uint8_t>::max();
const uint8_t table_offset = max_uint8 - max_val;
// Calculate normalizer sum(exp(x)).
int sum_j = 0;
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
uint8x16_t table_offset_dup = vdupq_n_u8(table_offset);
uint8x16_t offset_dup = vdupq_n_u8(offset);
uint32x4_t sum_4 = vdupq_n_u32(0);
const int multiplier_shift = 8;
for (; sum_j <= last_dim - 16; sum_j += 16) {
uint8x16_t input_value = vld1q_u8(input_data_uint + sum_j);
input_value = veorq_u8(input_value, offset_dup);
input_value = vaddq_u8(input_value, table_offset_dup);
const uint8x16_t output1 = aarch64_lookup_vector(table1, input_value);
const uint8x16_t output2 = aarch64_lookup_vector(table2, input_value);
uint16x8_t exp_value1 =
vshll_n_u8(vget_high_u8(output1), multiplier_shift);
uint16x8_t exp_value2 =
vshll_n_u8(vget_low_u8(output1), multiplier_shift);
exp_value1 = vaddw_u8(exp_value1, vget_high_u8(output2));
exp_value2 = vaddw_u8(exp_value2, vget_low_u8(output2));
sum_4 = vpadalq_u16(sum_4, exp_value1);
sum_4 = vpadalq_u16(sum_4, exp_value2);
}
int temp = vgetq_lane_u32(sum_4, 0) + vgetq_lane_u32(sum_4, 1) +
vgetq_lane_u32(sum_4, 2) + vgetq_lane_u32(sum_4, 3);
sum_exp += temp;
#endif
for (; sum_j < last_dim; ++sum_j) {
const uint8_t index = (input_data_uint[sum_j] ^ offset) + table_offset;
uint8_t part1 = params.uint8_table1[index];
uint8_t part2 = params.uint8_table2[index];
sum_exp += ((part1 << 8) + part2);
}
const float inv_sum_exp = 1.0f / (sum_exp * params.scale);
int32 multiplier, shift;
QuantizeMultiplier(inv_sum_exp, &multiplier, &shift);
// Normalize and quantize probabilities.
int j = 0;
#ifdef TFLITE_SOFTMAX_USE_UINT16_LUT
const int32x4_t output_zp_dup = vdupq_n_s32(params.zero_point);
const int32x4_t max_val_dup = vdupq_n_s32(clamp_max);
const int32x4_t min_val_dup = vdupq_n_s32(clamp_min);
for (; j <= last_dim - 16; j += 16) {
uint8x16_t input_value = vld1q_u8(input_data_uint + j);
input_value = veorq_u8(input_value, offset_dup);
input_value = vaddq_u8(input_value, table_offset_dup);
const uint8x16_t output1 = aarch64_lookup_vector(table1, input_value);
const uint8x16_t output2 = aarch64_lookup_vector(table2, input_value);
uint16x8_t exp_value1 =
vshll_n_u8(vget_high_u8(output1), multiplier_shift);
uint16x8_t exp_value2 =
vshll_n_u8(vget_low_u8(output1), multiplier_shift);
exp_value1 = vaddw_u8(exp_value1, vget_high_u8(output2));
exp_value2 = vaddw_u8(exp_value2, vget_low_u8(output2));
int32x4x4_t output_value;
output_value.val[0] =
vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(exp_value1)));
output_value.val[1] =
vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(exp_value1)));
output_value.val[2] =
vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(exp_value2)));
output_value.val[3] =
vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(exp_value2)));
int32x4x4_t temp_val =
MultiplyByQuantizedMultiplier4Rows(output_value, multiplier, shift);
temp_val.val[0] = vaddq_s32(temp_val.val[0], output_zp_dup);
temp_val.val[1] = vaddq_s32(temp_val.val[1], output_zp_dup);
temp_val.val[2] = vaddq_s32(temp_val.val[2], output_zp_dup);
temp_val.val[3] = vaddq_s32(temp_val.val[3], output_zp_dup);
temp_val.val[0] =
vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup);
temp_val.val[1] =
vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup);
temp_val.val[2] =
vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup);
temp_val.val[3] =
vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup);
StoreValue(temp_val, output_data + j);
}
#endif
for (; j < last_dim; ++j) {
const uint8_t index = (input_data_uint[j] ^ offset) + table_offset;
const uint8_t part1 = params.uint8_table1[index];
const uint8_t part2 = params.uint8_table2[index];
const int32_t exp_value = (part1 << 8) + part2;
const int32_t output_value =
MultiplyByQuantizedMultiplier(exp_value, multiplier, shift);
output_data[j] = static_cast<Out>(std::max(
std::min(clamp_max, output_value + params.zero_point), clamp_min));
}
input_data_uint += last_dim;
output_data += last_dim;
}
}
inline void LogSoftmax(const SoftmaxParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("LogSoftmax");
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
for (int i = 0; i < outer_size; ++i) {
VectorMap<const float> block_input(input_data + i * depth, depth, 1);
VectorMap<float> block_output(output_data + i * depth, depth, 1);
// Find max element value which we'll use to ensure numerical stability
// taking advantage of the following equality:
// log(exp(x[i])/sum(exp(x[i]))) == log(exp(x[i]+C)/sum(exp(x[i]+C)))
const float max = block_input.maxCoeff();
const float log_sum = std::log((block_input.array() - max).exp().sum());
block_output = block_input.array() - max - log_sum;
}
}
// Backwards compatibility. Less optimized than below version.
inline void LogSoftmax(const SoftmaxParams& params,
const RuntimeShape& input_shape, const uint8* input_data,
const RuntimeShape& output_shape, uint8* output_data) {
reference_ops::LogSoftmax(params, input_shape, input_data, output_shape,
output_data);
}
// Compute LogSoftmax as (x - x_max) - ln(sum(e^(x_i - x_max)...)
// as done in tf.nn.log_softmax to prevent underflow and overflow.
// This is in contrast to just log(softmax(x))
//
// To handle quantization, first dequantize the inputs (from doing
// e^(input scale * val) where we ignore the zero point since it cancels
// out during subtraction due to the ln) and do a rescale at the end to int8.
//
// Notably this makes use of float and is intended as the optimized
// form for quantized execution on CPU. For a fully integer version,
// see the reference op.
//
// TODO(tflite): notes for optimization:
// 1) See if e^ is also bottleneck in the reference fully-integer
// version and apply lookup there and compare.
template <typename T>
inline void LogSoftmax(const SoftmaxParams& params, float input_scale,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("LogSoftmax");
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int excluding_last_dim =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int last_dim =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
const int32_t clamp_max = std::numeric_limits<T>::max();
const int32_t clamp_min = std::numeric_limits<T>::min();
for (int i = 0; i < excluding_last_dim; ++i) {
T max_val = std::numeric_limits<T>::min();
// Find max quantized value.
for (int j = 0; j < last_dim; ++j) {
max_val = std::max(max_val, input_data[j]);
}
float sum_exp = 0.0f;
const int32_t max_uint8 = std::numeric_limits<uint8>::max();
// Offset into table to compute exp(scale*(x - xmax)) instead of
// exp(scale*(x)) to prevent overflow.
const float* table_offset = ¶ms.table[max_uint8 - max_val];
// Calculate sum(exp(scale*(x - x_max))).
for (int j = 0; j < last_dim; ++j) {
sum_exp += table_offset[input_data[j]];
}
const float log_sum_exp = std::log(sum_exp);
// params.scale is the output scale.
const float scale = input_scale / params.scale;
const float precomputed =
(input_scale * max_val + log_sum_exp) / params.scale;
for (int j = 0; j < last_dim; ++j) {
// Equivalent to (input_scale * (input_data[j] - max_val) - log_sum_exp) /
// output_scale.
const float log_prob = scale * input_data[j] - precomputed;
// TODO(tflite): look into better solution.
// Use std::rint over std::round (which is used in
// FakeQuant) since it's multiple times faster on tested arm32.
const int32_t prob_quantized = std::rint(log_prob) + params.zero_point;
output_data[j] = static_cast<T>(
std::max(std::min(clamp_max, prob_quantized), clamp_min));
}
input_data += last_dim;
output_data += last_dim;
}
}
inline void Logistic(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Logistic");
auto input_map = MapAsVector(input_data, input_shape);
auto output_map = MapAsVector(output_data, output_shape);
output_map.array() =
input_map.array().unaryExpr(Eigen::internal::scalar_logistic_op<float>());
}
// Convenience version that allows, for example, generated-code calls to be
// uniform between data types.
inline void Logistic(const LogisticParams&, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
// Drop params: not needed.
Logistic(input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const LogisticParams& params,
const RuntimeShape& input_shape, const int16* input_data,
const RuntimeShape& output_shape, int16* output_data) {
ruy::profiler::ScopeLabel label("Logistic/Int16");
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
}
int c = 0;
const int16* input_data_ptr = input_data;
int16* output_data_ptr = output_data;
#ifdef GEMMLOWP_NEON
{
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<int16x8_t, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<int16x8_t, 3>;
for (; c <= flat_size - 16; c += 16) {
F3 input0 = F3::FromRaw(vld1q_s16(input_data_ptr));
F3 input1 = F3::FromRaw(vld1q_s16(input_data_ptr + 8));
F0 output0 = gemmlowp::logistic(input0);
F0 output1 = gemmlowp::logistic(input1);
vst1q_s16(output_data_ptr, output0.raw());
vst1q_s16(output_data_ptr + 8, output1.raw());
input_data_ptr += 16;
output_data_ptr += 16;
}
for (; c <= flat_size - 8; c += 8) {
F3 input = F3::FromRaw(vld1q_s16(input_data_ptr));
F0 output = gemmlowp::logistic(input);
vst1q_s16(output_data_ptr, output.raw());
input_data_ptr += 8;
output_data_ptr += 8;
}
}
#endif
#ifdef GEMMLOWP_SSE4
{
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<gemmlowp::int16x8_m128i, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<gemmlowp::int16x8_m128i, 3>;
for (; c <= flat_size - 16; c += 16) {
F3 input0 = F3::FromRaw(gemmlowp::to_int16x8_m128i(
_mm_loadu_si128(reinterpret_cast<const __m128i*>(input_data_ptr))));
F3 input1 = F3::FromRaw(gemmlowp::to_int16x8_m128i(_mm_loadu_si128(
reinterpret_cast<const __m128i*>(input_data_ptr + 8))));
F0 output0 = gemmlowp::logistic(input0);
F0 output1 = gemmlowp::logistic(input1);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr),
output0.raw().v);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr + 8),
output1.raw().v);
input_data_ptr += 16;
output_data_ptr += 16;
}
for (; c <= flat_size - 8; c += 8) {
F3 input = F3::FromRaw(gemmlowp::to_int16x8_m128i(
_mm_loadu_si128(reinterpret_cast<const __m128i*>(input_data_ptr))));
F0 output = gemmlowp::logistic(input);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr),
output.raw().v);
input_data_ptr += 8;
output_data_ptr += 8;
}
}
#endif
{
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
for (; c < flat_size; ++c) {
F3 input = F3::FromRaw(*input_data_ptr);
F0 output = gemmlowp::logistic(input);
*output_data_ptr = output.raw();
++input_data_ptr;
++output_data_ptr;
}
}
}
inline void Tanh(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Tanh");
auto input_map = MapAsVector(input_data, input_shape);
auto output_map = MapAsVector(output_data, output_shape);
output_map.array() = input_map.array().tanh();
}
// Convenience version that allows, for example, generated-code calls to be
// uniform between data types.
inline void Tanh(const TanhParams&, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
// Drop params: not needed.
Tanh(input_shape, input_data, output_shape, output_data);
}
inline void Tanh(const TanhParams& params, const RuntimeShape& input_shape,
const int16* input_data, const RuntimeShape& output_shape,
int16* output_data) {
ruy::profiler::ScopeLabel label("Tanh/Int16");
const int input_left_shift = params.input_left_shift;
// Support for shifts is limited until we have a parameterized version of
// SaturatingRoundingMultiplyByPOT().
TFLITE_DCHECK_GE(input_left_shift, 0);
TFLITE_DCHECK_LE(input_left_shift, 1);
const int flat_size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
const int16* input_data_ptr = input_data;
int16* output_data_ptr = output_data;
#ifdef GEMMLOWP_NEON
{
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<int16x8_t, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<int16x8_t, 3>;
if (input_left_shift == 0) {
for (; c <= flat_size - 16; c += 16) {
F3 input0 = F3::FromRaw(vld1q_s16(input_data_ptr));
F3 input1 = F3::FromRaw(vld1q_s16(input_data_ptr + 8));
F0 output0 = gemmlowp::tanh(input0);
F0 output1 = gemmlowp::tanh(input1);
vst1q_s16(output_data_ptr, output0.raw());
vst1q_s16(output_data_ptr + 8, output1.raw());
input_data_ptr += 16;
output_data_ptr += 16;
}
for (; c <= flat_size - 8; c += 8) {
F3 input = F3::FromRaw(vld1q_s16(input_data_ptr));
F0 output = gemmlowp::tanh(input);
vst1q_s16(output_data_ptr, output.raw());
input_data_ptr += 8;
output_data_ptr += 8;
}
} else {
for (; c <= flat_size - 16; c += 16) {
F3 input0 = F3::FromRaw(gemmlowp::SaturatingRoundingMultiplyByPOT<1>(
vld1q_s16(input_data_ptr)));
F3 input1 = F3::FromRaw(gemmlowp::SaturatingRoundingMultiplyByPOT<1>(
vld1q_s16(input_data_ptr + 8)));
F0 output0 = gemmlowp::tanh(input0);
F0 output1 = gemmlowp::tanh(input1);
vst1q_s16(output_data_ptr, output0.raw());
vst1q_s16(output_data_ptr + 8, output1.raw());
input_data_ptr += 16;
output_data_ptr += 16;
}
for (; c <= flat_size - 8; c += 8) {
F3 input = F3::FromRaw(gemmlowp::SaturatingRoundingMultiplyByPOT<1>(
vld1q_s16(input_data_ptr)));
F0 output = gemmlowp::tanh(input);
vst1q_s16(output_data_ptr, output.raw());
input_data_ptr += 8;
output_data_ptr += 8;
}
}
}
#endif
#ifdef GEMMLOWP_SSE4
{
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<gemmlowp::int16x8_m128i, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<gemmlowp::int16x8_m128i, 3>;
if (input_left_shift == 0) {
for (; c <= flat_size - 16; c += 16) {
F3 input0 = F3::FromRaw(gemmlowp::to_int16x8_m128i(
_mm_loadu_si128(reinterpret_cast<const __m128i*>(input_data_ptr))));
F3 input1 = F3::FromRaw(gemmlowp::to_int16x8_m128i(_mm_loadu_si128(
reinterpret_cast<const __m128i*>(input_data_ptr + 8))));
F0 output0 = gemmlowp::tanh(input0);
F0 output1 = gemmlowp::tanh(input1);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr),
output0.raw().v);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr + 8),
output1.raw().v);
input_data_ptr += 16;
output_data_ptr += 16;
}
for (; c <= flat_size - 8; c += 8) {
F3 input = F3::FromRaw(gemmlowp::to_int16x8_m128i(
_mm_loadu_si128(reinterpret_cast<const __m128i*>(input_data_ptr))));
F0 output = gemmlowp::tanh(input);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr),
output.raw().v);
input_data_ptr += 8;
output_data_ptr += 8;
}
} else {
for (; c <= flat_size - 16; c += 16) {
F3 input0 = F3::FromRaw(gemmlowp::SaturatingRoundingMultiplyByPOT<1>(
gemmlowp::to_int16x8_m128i(_mm_loadu_si128(
reinterpret_cast<const __m128i*>(input_data_ptr)))));
F3 input1 = F3::FromRaw(gemmlowp::SaturatingRoundingMultiplyByPOT<1>(
gemmlowp::to_int16x8_m128i(_mm_loadu_si128(
reinterpret_cast<const __m128i*>(input_data_ptr + 8)))));
F0 output0 = gemmlowp::tanh(input0);
F0 output1 = gemmlowp::tanh(input1);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr),
output0.raw().v);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr + 8),
output1.raw().v);
input_data_ptr += 16;
output_data_ptr += 16;
}
for (; c <= flat_size - 8; c += 8) {
F3 input = F3::FromRaw(gemmlowp::SaturatingRoundingMultiplyByPOT<1>(
gemmlowp::to_int16x8_m128i(_mm_loadu_si128(
reinterpret_cast<const __m128i*>(input_data_ptr)))));
F0 output = gemmlowp::tanh(input);
_mm_storeu_si128(reinterpret_cast<__m128i*>(output_data_ptr),
output.raw().v);
input_data_ptr += 8;
output_data_ptr += 8;
}
}
}
#endif
{
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
if (input_left_shift == 0) {
for (; c < flat_size; ++c) {
F3 input = F3::FromRaw(*input_data_ptr);
F0 output = gemmlowp::tanh(input);
*output_data_ptr = output.raw();
++input_data_ptr;
++output_data_ptr;
}
} else {
for (; c < flat_size; ++c) {
F3 input = F3::FromRaw(
gemmlowp::SaturatingRoundingMultiplyByPOT<1>(*input_data_ptr));
F0 output = gemmlowp::tanh(input);
*output_data_ptr = output.raw();
++input_data_ptr;
++output_data_ptr;
}
}
}
}
template <typename SrcT, typename DstT>
inline void Cast(const RuntimeShape& input_shape, const SrcT* input_data,
const RuntimeShape& output_shape, DstT* output_data) {
ruy::profiler::ScopeLabel label("Cast");
auto input_map = MapAsVector(input_data, input_shape);
auto output_map = MapAsVector(output_data, output_shape);
output_map.array() = input_map.array().template cast<DstT>();
}
inline void Floor(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Floor");
auto input_map = MapAsVector(input_data, input_shape);
auto output_map = MapAsVector(output_data, output_shape);
output_map.array() = Eigen::floor(input_map.array());
}
inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Ceil");
auto input_map = MapAsVector(input_data, input_shape);
auto output_map = MapAsVector(output_data, output_shape);
output_map.array() = Eigen::ceil(input_map.array());
}
// Helper methods for BatchToSpaceND.
// `spatial_index_dim` specifies post-crop offset index in this spatial
// dimension, i.e. spatial offset introduced by flattening batch to spatial
// dimension minus the crop size at beginning. `block_shape_dim` is the block
// size in current dimension. `input_dim` and `output_dim` are input and output
// size of BatchToSpaceND operation in current dimension.
// Output start index is inclusive and end index is exclusive.
inline void GetIndexRange(int spatial_index_dim, int block_shape_dim,
int input_dim, int output_dim, int* start_index,
int* end_index) {
// (*start_index) * block_shape_dim is effectively rounded up to the next
// multiple of block_shape_dim by the integer division.
*start_index =
std::max(0, (-spatial_index_dim + block_shape_dim - 1) / block_shape_dim);
// Similarly, (*end_index) * block_shape_dim is rounded up too (note that
// end_index is exclusive).
*end_index = std::min(
input_dim,
(output_dim - spatial_index_dim + block_shape_dim - 1) / block_shape_dim);
}
template <typename T>
inline void BatchToSpaceND(
const RuntimeShape& unextended_input1_shape, const T* input1_data,
const RuntimeShape& unextended_input2_shape, const int32* block_shape_data,
const RuntimeShape& unextended_input3_shape, const int32* crops_data,
const RuntimeShape& unextended_output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("BatchToSpaceND");
TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(),
unextended_output_shape.DimensionsCount());
// Extends the input/output shape from 3D to 4D if needed, NHC -> NH1C.
auto extend_shape = [](const RuntimeShape& shape) {
if (shape.DimensionsCount() == 4) {
return shape;
}
RuntimeShape new_shape(4, 1);
new_shape.SetDim(0, shape.Dims(0));
new_shape.SetDim(1, shape.Dims(1));
new_shape.SetDim(3, shape.Dims(2));
return new_shape;
};
const RuntimeShape input1_shape = extend_shape(unextended_input1_shape);
const RuntimeShape output_shape = extend_shape(unextended_output_shape);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch_size = output_shape.Dims(0);
const int depth = input1_shape.Dims(3);
const int input_width = input1_shape.Dims(2);
const int input_height = input1_shape.Dims(1);
const int input_batch_size = input1_shape.Dims(0);
const int block_shape_height = block_shape_data[0];
const int block_shape_width =
unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1;
const int crops_top = crops_data[0];
const int crops_left =
unextended_input1_shape.DimensionsCount() == 4 ? crops_data[2] : 0;
for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) {
const int out_batch = in_batch % output_batch_size;
const int spatial_offset = in_batch / output_batch_size;
int in_h_start = 0;
int in_h_end = 0;
// GetIndexRange ensures start and end indices are in [0, output_height).
GetIndexRange(spatial_offset / block_shape_width - crops_top,
block_shape_height, input_height, output_height, &in_h_start,
&in_h_end);
for (int in_h = in_h_start; in_h < in_h_end; ++in_h) {
const int out_h = in_h * block_shape_height +
spatial_offset / block_shape_width - crops_top;
TFLITE_DCHECK_GE(out_h, 0);
TFLITE_DCHECK_LT(out_h, output_height);
int in_w_start = 0;
int in_w_end = 0;
// GetIndexRange ensures start and end indices are in [0, output_width).
GetIndexRange(spatial_offset % block_shape_width - crops_left,
block_shape_width, input_width, output_width, &in_w_start,
&in_w_end);
for (int in_w = in_w_start; in_w < in_w_end; ++in_w) {
const int out_w = in_w * block_shape_width +
spatial_offset % block_shape_width - crops_left;
TFLITE_DCHECK_GE(out_w, 0);
TFLITE_DCHECK_LT(out_w, output_width);
T* out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0);
const T* in =
input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0);
memcpy(out, in, depth * sizeof(T));
}
}
}
}
template <typename T>
void TypedMemset(void* ptr, T value, size_t num) {
// Optimization for common cases where memset() will suffice.
if (value == 0 || std::is_same<T, uint8_t>::value) {
memset(ptr, value, num * sizeof(T));
} else {
// Default implementation for cases where memset() will not preserve the
// bytes, e.g., typically when sizeof(T) > sizeof(uint8_t).
char* pos = static_cast<char*>(ptr);
for (size_t i = 0; i < num; ++i) {
memcpy(pos, &value, sizeof(T));
pos = pos + sizeof(T);
}
}
}
// This makes heavy use of Offset, along with conditional branches. There may be
// opportunities for improvement.
//
// There are two versions of pad: Pad and PadV2. In PadV2 there is a second
// scalar input that provides the padding value. Therefore pad_value_ptr can be
// equivalent to a simple input1_data. For Pad, it should point to a zero
// value.
//
// Note that two typenames are required, so that T=P=int32 is considered a
// specialization distinct from P=int32.
template <typename T, typename P>
inline void PadImpl(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("PadImpl");
const int max_supported_dims = 5;
const RuntimeShape ext_input_shape =
RuntimeShape::ExtendedShape(max_supported_dims, input_shape);
const RuntimeShape ext_output_shape =
RuntimeShape::ExtendedShape(max_supported_dims, output_shape);
TFLITE_DCHECK_LE(op_params.left_padding_count, max_supported_dims);
TFLITE_DCHECK_LE(op_params.right_padding_count, max_supported_dims);
// Pad kernels are limited to max 4 dimensions. Copy inputs so we can pad them
// to 4 dims (yes, we are "padding the padding").
std::vector<int> left_padding_copy(max_supported_dims, 0);
const int left_padding_extend =
max_supported_dims - op_params.left_padding_count;
for (int i = 0; i < op_params.left_padding_count; ++i) {
left_padding_copy[left_padding_extend + i] = op_params.left_padding[i];
}
std::vector<int> right_padding_copy(max_supported_dims, 0);
const int right_padding_extend =
max_supported_dims - op_params.right_padding_count;
for (int i = 0; i < op_params.right_padding_count; ++i) {
right_padding_copy[right_padding_extend + i] = op_params.right_padding[i];
}
const int output_batch = ext_output_shape.Dims(0);
const int output_spatial_dim1 = ext_output_shape.Dims(1);
const int output_spatial_dim2 = ext_output_shape.Dims(2);
const int output_spatial_dim3 = ext_output_shape.Dims(3);
const int output_channel = ext_output_shape.Dims(4);
const int left_b_padding = left_padding_copy[0];
const int left_s1_padding = left_padding_copy[1];
const int left_s2_padding = left_padding_copy[2];
const int left_s3_padding = left_padding_copy[3];
const int left_c_padding = left_padding_copy[4];
const int right_b_padding = right_padding_copy[0];
const int right_s1_padding = right_padding_copy[1];
const int right_s2_padding = right_padding_copy[2];
const int right_s3_padding = right_padding_copy[3];
const int right_c_padding = right_padding_copy[4];
const int input_depth = ext_input_shape.Dims(4);
const T pad_value = *pad_value_ptr;
if (left_b_padding != 0) {
TypedMemset<T>(output_data, pad_value,
left_b_padding * output_spatial_dim1 * output_spatial_dim2 *
output_spatial_dim3 * output_channel);
}
for (int out_b = left_b_padding; out_b < output_batch - right_b_padding;
++out_b) {
if (left_s1_padding != 0) {
TypedMemset<T>(output_data + Offset(ext_output_shape, out_b, 0, 0, 0, 0),
pad_value,
left_s1_padding * output_spatial_dim2 *
output_spatial_dim3 * output_channel);
}
for (int out_p = left_s1_padding;
out_p < output_spatial_dim1 - right_s1_padding; ++out_p) {
if (left_s2_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, out_b, out_p, 0, 0, 0),
pad_value, left_s2_padding * output_spatial_dim3 * output_channel);
}
for (int out_h = left_s2_padding;
out_h < output_spatial_dim2 - right_s2_padding; ++out_h) {
if (left_s3_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, out_b, out_p, out_h, 0, 0),
pad_value, left_s3_padding * output_channel);
}
for (int out_w = left_s3_padding;
out_w < output_spatial_dim3 - right_s3_padding; ++out_w) {
if (left_c_padding != 0) {
TypedMemset<T>(output_data + Offset(ext_output_shape, out_b, out_p,
out_h, out_w, 0),
pad_value, left_c_padding);
}
T* out = output_data + Offset(ext_output_shape, out_b, out_p, out_h,
out_w, left_c_padding);
const T* in = input_data +
Offset(ext_input_shape, out_b - left_b_padding,
out_p - left_s1_padding, out_h - left_s2_padding,
out_w - left_s3_padding, 0);
memcpy(out, in, input_depth * sizeof(T));
if (right_c_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, out_b, out_p, out_h,
out_w, output_channel - right_c_padding),
pad_value, right_c_padding);
}
}
if (right_s3_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, out_b, out_p, out_h,
output_spatial_dim3 - right_s3_padding, 0),
pad_value, right_s3_padding * output_channel);
}
}
if (right_s2_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, out_b, out_p,
output_spatial_dim2 - right_s2_padding, 0, 0),
pad_value, right_s2_padding * output_spatial_dim3 * output_channel);
}
}
if (right_s1_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, out_b,
output_spatial_dim1 - right_s1_padding, 0, 0, 0),
pad_value,
right_s1_padding * output_spatial_dim2 * output_spatial_dim3 *
output_channel);
}
}
if (right_b_padding != 0) {
TypedMemset<T>(
output_data + Offset(ext_output_shape, output_batch - right_b_padding,
0, 0, 0, 0),
pad_value,
right_b_padding * output_spatial_dim1 * output_spatial_dim2 *
output_spatial_dim3 * output_channel);
}
}
template <typename T, typename P>
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
// The second (pad-value) input can be int32 when, say, the first is uint8.
template <typename T>
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const int32* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
const T converted_pad_value = static_cast<T>(*pad_value_ptr);
PadImpl(op_params, input_shape, input_data, &converted_pad_value,
output_shape, output_data);
}
// This version avoids conflicting template matching.
template <>
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const int32* input_data,
const int32* pad_value_ptr, const RuntimeShape& output_shape,
int32* output_data) {
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
// TODO(b/117643175): Optimize. (This is an introductory copy of standard Pad.)
//
// This pad requires that (a) left and right paddings are in the 4D patterns
// {0, h_pad, w_pad, 0}, and (b) memset can be used: *pad_value_ptr == 0 and/or
// T is uint8.
//
// There are two versions of pad: Pad and PadV2. In PadV2 there is a second
// scalar input that provides the padding value. Therefore pad_value_ptr can be
// equivalent to a simple input1_data. For Pad, it should point to a zero
// value.
//
// Note that two typenames are required, so that T=P=int32 is considered a
// specialization distinct from P=int32.
template <typename T, typename P>
inline void PadImageStyleMemset(const tflite::PadParams& op_params,
const RuntimeShape& input_shape,
const T* input_data, const P* pad_value_ptr,
const RuntimeShape& output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("PadImageStyle");
const RuntimeShape ext_input_shape =
RuntimeShape::ExtendedShape(4, input_shape);
const RuntimeShape ext_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
TFLITE_DCHECK_LE(op_params.left_padding_count, 4);
TFLITE_DCHECK_LE(op_params.right_padding_count, 4);
// Pad kernels are limited to max 4 dimensions. Copy inputs so we can pad them
// to 4 dims (yes, we are "padding the padding").
std::vector<int> left_padding_copy(4, 0);
const int left_padding_extend = 4 - op_params.left_padding_count;
for (int i = 0; i < op_params.left_padding_count; ++i) {
left_padding_copy[left_padding_extend + i] = op_params.left_padding[i];
}
std::vector<int> right_padding_copy(4, 0);
const int right_padding_extend = 4 - op_params.right_padding_count;
for (int i = 0; i < op_params.right_padding_count; ++i) {
right_padding_copy[right_padding_extend + i] = op_params.right_padding[i];
}
// The following padding restrictions are contractual requirements, and
// embody what it means for a padding op to be "image-style".
TFLITE_DCHECK_EQ(left_padding_copy[0], 0);
TFLITE_DCHECK_EQ(left_padding_copy[3], 0);
TFLITE_DCHECK_EQ(right_padding_copy[0], 0);
TFLITE_DCHECK_EQ(right_padding_copy[3], 0);
const int batch = MatchingDim(ext_input_shape, 0, ext_output_shape, 0);
const int output_height = ext_output_shape.Dims(1);
const int output_width = ext_output_shape.Dims(2);
const int input_height = ext_input_shape.Dims(1);
const int input_width = ext_input_shape.Dims(2);
const int depth = MatchingDim(ext_input_shape, 3, ext_output_shape, 3);
const int left_h_padding = left_padding_copy[1];
const int left_w_padding = left_padding_copy[2];
const int right_h_padding = right_padding_copy[1];
const int right_w_padding = right_padding_copy[2];
TFLITE_DCHECK_EQ(output_height,
input_height + left_h_padding + right_h_padding);
TFLITE_DCHECK_EQ(output_width,
input_width + left_w_padding + right_w_padding);
const T pad_value = *pad_value_ptr;
const int top_block_size = left_h_padding * output_width * depth;
const size_t num_top_block_bytes = top_block_size * sizeof(T);
const int bottom_block_size = right_h_padding * output_width * depth;
const size_t num_bottom_block_bytes = bottom_block_size * sizeof(T);
const int left_blocks_size = left_w_padding * depth;
const size_t num_left_block_bytes = left_blocks_size * sizeof(T);
const int right_blocks_size = right_w_padding * depth;
const size_t num_right_block_bytes = right_blocks_size * sizeof(T);
const int inner_line_size = input_width * depth;
const size_t num_inner_line_bytes = inner_line_size * sizeof(T);
if (input_height == 0) {
memset(output_data, pad_value,
num_top_block_bytes + num_bottom_block_bytes);
} else {
for (int i = 0; i < batch; ++i) {
// For each image in the batch, apply the top padding, then iterate
// through rows, then apply the bottom padding.
//
// By unwinding one iteration, we can combine the first left-margin
// padding with the top padding, and the last right-margin padding with
// the bottom padding.
memset(output_data, pad_value,
num_top_block_bytes + num_left_block_bytes);
output_data += top_block_size + left_blocks_size;
memcpy(output_data, input_data, num_inner_line_bytes);
input_data += inner_line_size;
output_data += inner_line_size;
// One iteration unwound.
// Unwinding this loop affords the opportunity to reorder the loop work
// and hence combine memset() calls.
//
// Before unwinding:
// for (int j = 0; j < input_height; ++j) {
// // Pad on left, copy central data, pad on right.
// memset(output_data, pad_value, num_left_block_bytes);
// output_data += left_blocks_size;
// memcpy(output_data, input_data, num_inner_line_bytes);
// input_data += inner_line_size;
// output_data += inner_line_size;
// memset(output_data, pad_value, num_right_block_bytes);
// output_data += right_blocks_size;
// }
for (int j = 1; j < input_height; ++j) {
memset(output_data, pad_value,
num_right_block_bytes + num_left_block_bytes);
output_data += right_blocks_size + left_blocks_size;
memcpy(output_data, input_data, num_inner_line_bytes);
input_data += inner_line_size;
output_data += inner_line_size;
}
memset(output_data, pad_value,
num_right_block_bytes + num_bottom_block_bytes);
output_data += right_blocks_size + bottom_block_size;
}
}
}
template <typename T, typename P>
inline void PadImageStyle(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr,
const RuntimeShape& output_shape, T* output_data) {
reference_ops::PadImageStyle(op_params, input_shape, input_data,
pad_value_ptr, output_shape, output_data);
}
template <typename P>
inline void PadImageStyle(const tflite::PadParams& op_params,
const RuntimeShape& input_shape,
const uint8* input_data, const P* pad_value_ptr,
const RuntimeShape& output_shape,
uint8* output_data) {
PadImageStyleMemset(op_params, input_shape, input_data, pad_value_ptr,
output_shape, output_data);
}
template <typename P>
inline void PadImageStyle(const tflite::PadParams& op_params,
const RuntimeShape& input_shape,
const float* input_data, const P* pad_value_ptr,
const RuntimeShape& output_shape,
float* output_data) {
const float converted_pad_value = static_cast<float>(*pad_value_ptr);
if (converted_pad_value == 0.0f) {
PadImageStyleMemset(op_params, input_shape, input_data, pad_value_ptr,
output_shape, output_data);
} else {
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
}
template <typename T>
inline void Slice(const tflite::SliceParams& op_params,
const RuntimeShape& input_shape,
const RuntimeShape& output_shape,
SequentialTensorWriter<T>* writer) {
ruy::profiler::ScopeLabel label("Slice");
const RuntimeShape ext_shape = RuntimeShape::ExtendedShape(5, input_shape);
TFLITE_DCHECK_LE(op_params.begin_count, 5);
TFLITE_DCHECK_LE(op_params.size_count, 5);
const int begin_count = op_params.begin_count;
const int size_count = op_params.size_count;
// We front-pad the begin and size vectors.
std::array<int, 5> start;
std::array<int, 5> stop;
for (int i = 0; i < 5; ++i) {
int padded_i = 5 - i;
start[i] =
begin_count < padded_i ? 0 : op_params.begin[begin_count - padded_i];
stop[i] =
(size_count < padded_i || op_params.size[size_count - padded_i] == -1)
? ext_shape.Dims(i)
: start[i] + op_params.size[size_count - padded_i];
}
for (int i0 = start[0]; i0 < stop[0]; ++i0) {
for (int i1 = start[1]; i1 < stop[1]; ++i1) {
for (int i2 = start[2]; i2 < stop[2]; ++i2) {
for (int i3 = start[3]; i3 < stop[3]; ++i3) {
const int len = stop[4] - start[4];
if (len > 0)
writer->WriteN(Offset(ext_shape, i0, i1, i2, i3, start[4]), len);
}
}
}
}
}
template <typename T>
inline void Slice(const tflite::SliceParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
SequentialTensorWriter<T> writer(input_data, output_data);
return Slice(op_params, input_shape, output_shape, &writer);
}
template <typename T>
inline void Slice(const tflite::SliceParams& op_params,
const RuntimeShape& input_shape, const TfLiteTensor* input,
const RuntimeShape& output_shape, TfLiteTensor* output) {
SequentialTensorWriter<T> writer(input, output);
return Slice(op_params, input_shape, output_shape, &writer);
}
// Note: This implementation is only optimized for the case where the inner
// stride == 1.
template <typename T>
inline void StridedSlice(const tflite::StridedSliceParams& op_params,
const RuntimeShape& unextended_input_shape,
const RuntimeShape& unextended_output_shape,
SequentialTensorWriter<T>* writer) {
using strided_slice::LoopCondition;
using strided_slice::StartForAxis;
using strided_slice::StopForAxis;
ruy::profiler::ScopeLabel label("StridedSlice");
// Note that the output_shape is not used herein.
tflite::StridedSliceParams params_copy = op_params;
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 5);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(5, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(5, unextended_output_shape);
// Reverse and pad to 5 dimensions because that is what the runtime code
// requires (ie. all shapes must be 5D and are given backwards).
strided_slice::StridedSlicePadIndices(¶ms_copy, 5);
const int start_0 = StartForAxis(params_copy, input_shape, 0);
const int stop_0 = StopForAxis(params_copy, input_shape, 0, start_0);
const int start_1 = StartForAxis(params_copy, input_shape, 1);
const int stop_1 = StopForAxis(params_copy, input_shape, 1, start_1);
const int start_2 = StartForAxis(params_copy, input_shape, 2);
const int stop_2 = StopForAxis(params_copy, input_shape, 2, start_2);
const int start_3 = StartForAxis(params_copy, input_shape, 3);
const int stop_3 = StopForAxis(params_copy, input_shape, 3, start_3);
const int start_4 = StartForAxis(params_copy, input_shape, 4);
const int stop_4 = StopForAxis(params_copy, input_shape, 4, start_4);
const bool inner_stride_is_1 = params_copy.strides[4] == 1;
for (int offset_0 = start_0 * input_shape.Dims(1),
end_0 = stop_0 * input_shape.Dims(1),
step_0 = params_copy.strides[0] * input_shape.Dims(1);
!LoopCondition(offset_0, end_0, params_copy.strides[0]);
offset_0 += step_0) {
for (int offset_1 = (offset_0 + start_1) * input_shape.Dims(2),
end_1 = (offset_0 + stop_1) * input_shape.Dims(2),
step_1 = params_copy.strides[1] * input_shape.Dims(2);
!LoopCondition(offset_1, end_1, params_copy.strides[1]);
offset_1 += step_1) {
for (int offset_2 = (offset_1 + start_2) * input_shape.Dims(3),
end_2 = (offset_1 + stop_2) * input_shape.Dims(3),
step_2 = params_copy.strides[2] * input_shape.Dims(3);
!LoopCondition(offset_2, end_2, params_copy.strides[2]);
offset_2 += step_2) {
for (int offset_3 = (offset_2 + start_3) * input_shape.Dims(4),
end_3 = (offset_2 + stop_3) * input_shape.Dims(4),
step_3 = params_copy.strides[3] * input_shape.Dims(4);
!LoopCondition(offset_3, end_3, params_copy.strides[3]);
offset_3 += step_3) {
// When the stride is 1, the inner loop is equivalent to the
// optimized slice inner loop. Otherwise, it is identical to the
// strided_slice reference implementation inner loop.
if (inner_stride_is_1) {
const int len = stop_4 - start_4;
if (len > 0) {
writer->WriteN(offset_3 + start_4, len);
}
} else {
for (int offset_4 = offset_3 + start_4, end_4 = offset_3 + stop_4;
!LoopCondition(offset_4, end_4, params_copy.strides[4]);
offset_4 += params_copy.strides[4]) {
writer->Write(offset_4);
}
}
}
}
}
}
}
template <typename T>
inline void StridedSlice(const tflite::StridedSliceParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
SequentialTensorWriter<T> writer(input_data, output_data);
StridedSlice<T>(op_params, unextended_input_shape, unextended_output_shape,
&writer);
}
template <typename T>
inline void StridedSlice(const tflite::StridedSliceParams& op_params,
const RuntimeShape& unextended_input_shape,
const TfLiteTensor* input,
const RuntimeShape& unextended_output_shape,
TfLiteTensor* output) {
SequentialTensorWriter<T> writer(input, output);
StridedSlice<T>(op_params, unextended_input_shape, unextended_output_shape,
&writer);
}
template <typename T>
void Minimum(const RuntimeShape& input1_shape, const T* input1_data,
const T* input2_data, const RuntimeShape& output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("TensorFlowMinimum");
auto input1_map = MapAsVector(input1_data, input1_shape);
auto output_map = MapAsVector(output_data, output_shape);
auto min_value = input2_data[0];
output_map.array() = input1_map.array().min(min_value);
}
// Convenience version that allows, for example, generated-code calls to be
// the same as other binary ops.
template <typename T>
inline void Minimum(const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape&, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
// Drop shape of second input: not needed.
Minimum(input1_shape, input1_data, input2_data, output_shape, output_data);
}
template <typename T>
void Maximum(const RuntimeShape& input1_shape, const T* input1_data,
const T* input2_data, const RuntimeShape& output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("TensorFlowMaximum");
auto input1_map = MapAsVector(input1_data, input1_shape);
auto output_map = MapAsVector(output_data, output_shape);
auto max_value = input2_data[0];
output_map.array() = input1_map.array().max(max_value);
}
// Convenience version that allows, for example, generated-code calls to be
// the same as other binary ops.
template <typename T>
inline void Maximum(const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape&, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
// Drop shape of second input: not needed.
Maximum(input1_shape, input1_data, input2_data, output_shape, output_data);
}
template <typename T>
void TransposeIm2col(const ConvParams& params, uint8 zero_byte,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& filter_shape,
const RuntimeShape& output_shape, T* im2col_data) {
ruy::profiler::ScopeLabel label("TransposeIm2col");
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK(im2col_data);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
MatchingDim(output_shape, 3, filter_shape, 0); // output_depth
// Construct the MxN sized im2col matrix.
// The rows M, are sub-ordered B x H x W
const RuntimeShape row_shape({1, batches, output_height, output_width});
// The columns, N, are sub-ordered Kh x Kw x Din
const RuntimeShape col_shape({1, filter_height, filter_width, input_depth});
// Use dimensions M and N to construct dims for indexing directly into im2col
const RuntimeShape im2col_shape(
{1, 1, row_shape.FlatSize(), col_shape.FlatSize()});
// Build the im2col matrix by looping through all the input pixels,
// computing their influence on the output, rather than looping through all
// the output pixels. We therefore must initialize the im2col array to zero.
// This is potentially inefficient because we subsequently overwrite bytes
// set here. However, in practice memset is very fast and costs negligible.
memset(im2col_data, zero_byte, im2col_shape.FlatSize() * sizeof(T));
// Loop through the output batches
for (int batch = 0; batch < batches; ++batch) {
// Loop through input pixels one at a time.
for (int in_y = 0; in_y < input_height; ++in_y) {
for (int in_x = 0; in_x < input_width; ++in_x) {
// Loop through the output pixels it will influence
const int out_x_origin = (in_x * stride_width) - pad_width;
const int out_y_origin = (in_y * stride_height) - pad_height;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int out_y = out_y_origin + filter_y;
// Is output pixel within height bounds?
if ((out_y >= 0) && (out_y < output_height)) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int out_x = out_x_origin + filter_x;
// Is output pixel within width bounds?
if ((out_x >= 0) && (out_x < output_width)) {
// Copy the input elements of this pixel
T const* src =
input_data + Offset(input_shape, batch, in_y, in_x, 0);
int row_offset = Offset(row_shape, 0, batch, out_y, out_x);
int col_offset = Offset(col_shape, 0, filter_y, filter_x, 0);
T* dst = im2col_data +
Offset(im2col_shape, 0, 0, row_offset, col_offset);
memcpy(dst, src, input_depth * sizeof(T));
}
}
}
}
}
}
}
}
// Returns in 'im_data' (assumes to be zero-initialized) image patch in storage
// order (height, width, depth), constructed from patches in 'col_data', which
// is required to be in storage order (out_height * out_width, filter_height,
// filter_width, in_depth). Implementation by Yangqing Jia (jiayq).
// Copied from //tensorflow/core/kernels/conv_grad_input_ops.cc
template <typename T>
void Col2im(const T* col_data, const int depth, const int height,
const int width, const int filter_h, const int filter_w,
const int pad_t, const int pad_l, const int pad_b, const int pad_r,
const int stride_h, const int stride_w, T* im_data) {
ruy::profiler::ScopeLabel label("Col2im");
int height_col = (height + pad_t + pad_b - filter_h) / stride_h + 1;
int width_col = (width + pad_l + pad_r - filter_w) / stride_w + 1;
int h_pad = -pad_t;
for (int h = 0; h < height_col; ++h) {
int w_pad = -pad_l;
for (int w = 0; w < width_col; ++w) {
T* im_patch_data = im_data + (h_pad * width + w_pad) * depth;
for (int ih = h_pad; ih < h_pad + filter_h; ++ih) {
for (int iw = w_pad; iw < w_pad + filter_w; ++iw) {
if (ih >= 0 && ih < height && iw >= 0 && iw < width) {
// TODO(andydavis) Vectorize this loop (if compiler does not).
for (int i = 0; i < depth; ++i) {
im_patch_data[i] += col_data[i];
}
}
im_patch_data += depth;
col_data += depth;
}
// Jump over remaining number of depth.
im_patch_data += depth * (width - filter_w);
}
w_pad += stride_w;
}
h_pad += stride_h;
}
}
template <typename T>
void BiasAdd(T* im_data, const T* bias_data, const int batch_size,
const int height, const int width, const int depth) {
if (bias_data) {
for (int n = 0; n < batch_size; ++n) {
for (int h = 0; h < height; ++h) {
for (int w = 0; w < width; ++w) {
for (int d = 0; d < depth; ++d) {
im_data[d] += bias_data[d];
}
im_data += depth;
}
}
}
}
}
// TransposeConvV2 expect the weights in HWOI order.
inline void TransposeConvV2(
const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& hwoi_ordered_filter_shape,
const float* hwoi_ordered_filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* const output_data, const RuntimeShape& col2im_shape,
float* col2im_data, CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("TransposeConvV2/float");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(hwoi_ordered_filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK(col2im_data);
TFLITE_DCHECK(hwoi_ordered_filter_data);
const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
const int input_image_size = input_shape.Dims(1) * input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_image_size = output_height * output_width;
const int input_depth =
MatchingDim(input_shape, 3, hwoi_ordered_filter_shape, 3);
const int output_depth =
MatchingDim(output_shape, 3, hwoi_ordered_filter_shape, 2);
const int input_offset = input_image_size * input_depth;
const int output_offset = output_image_size * output_depth;
const int filter_height = hwoi_ordered_filter_shape.Dims(0);
const int filter_width = hwoi_ordered_filter_shape.Dims(1);
const int padding_top = params.padding_values.height;
const int padding_bottom =
params.padding_values.height + params.padding_values.height_offset;
const int padding_left = params.padding_values.width;
const int padding_right =
params.padding_values.width + params.padding_values.width_offset;
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
const int hwoi_ordered_filter_total_size =
filter_height * filter_width * output_depth;
cpu_backend_gemm::MatrixParams<float> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = hwoi_ordered_filter_total_size;
lhs_params.cols = input_depth;
float* output_data_p = output_data;
std::fill_n(output_data, output_offset * batch_size, 0.0f);
for (int i = 0; i < batch_size; ++i) {
cpu_backend_gemm::MatrixParams<float> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = input_depth;
rhs_params.cols = input_image_size;
cpu_backend_gemm::MatrixParams<float> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = hwoi_ordered_filter_total_size;
dst_params.cols = input_image_size;
cpu_backend_gemm::GemmParams<float, float> gemm_params;
cpu_backend_gemm::Gemm(lhs_params, hwoi_ordered_filter_data, rhs_params,
input_data + input_offset * i, dst_params,
col2im_data, gemm_params, cpu_backend_context);
Col2im(col2im_data, output_depth, output_height, output_width,
filter_height, filter_width, padding_top, padding_left,
padding_bottom, padding_right, stride_height, stride_width,
output_data_p);
output_data_p += output_offset;
}
output_data_p = output_data;
BiasAdd(output_data_p, bias_data, batch_size, output_height, output_width,
output_depth);
}
inline void Quantize(int32_t multiplier, int32_t shift, int32_t total_size,
int32_t output_zp, int32_t* scratch, uint8_t* output) {
ruy::profiler::ScopeLabel label("Quantize/uint8");
int i = 0;
const int32_t output_min = std::numeric_limits<uint8_t>::min();
const int32_t output_max = std::numeric_limits<uint8_t>::max();
#ifdef USE_NEON
const int32x4_t output_zp_dup = vdupq_n_s32(output_zp);
const int32x4_t max_val_dup = vdupq_n_s32(output_max);
const int32x4_t min_val_dup = vdupq_n_s32(output_min);
using gemmlowp::RoundingDivideByPOT;
using gemmlowp::SaturatingRoundingDoublingHighMul;
for (; i <= total_size - 16; i += 16) {
int32x4x4_t scratch_val;
scratch_val.val[0] = vld1q_s32(scratch + i);
scratch_val.val[1] = vld1q_s32(scratch + i + 4);
scratch_val.val[2] = vld1q_s32(scratch + i + 8);
scratch_val.val[3] = vld1q_s32(scratch + i + 12);
int32x4x4_t temp_val =
MultiplyByQuantizedMultiplier4Rows(scratch_val, multiplier, shift);
temp_val.val[0] = vaddq_s32(temp_val.val[0], output_zp_dup);
temp_val.val[1] = vaddq_s32(temp_val.val[1], output_zp_dup);
temp_val.val[2] = vaddq_s32(temp_val.val[2], output_zp_dup);
temp_val.val[3] = vaddq_s32(temp_val.val[3], output_zp_dup);
temp_val.val[0] =
vmaxq_s32(vminq_s32(temp_val.val[0], max_val_dup), min_val_dup);
temp_val.val[1] =
vmaxq_s32(vminq_s32(temp_val.val[1], max_val_dup), min_val_dup);
temp_val.val[2] =
vmaxq_s32(vminq_s32(temp_val.val[2], max_val_dup), min_val_dup);
temp_val.val[3] =
vmaxq_s32(vminq_s32(temp_val.val[3], max_val_dup), min_val_dup);
const uint16x8_t result_1 =
vcombine_u16(vqmovn_u32(vreinterpretq_u32_s32(temp_val.val[0])),
vqmovn_u32(vreinterpretq_u32_s32(temp_val.val[1])));
const uint16x8_t result_2 =
vcombine_u16(vqmovn_u32(vreinterpretq_u32_s32(temp_val.val[2])),
vqmovn_u32(vreinterpretq_u32_s32(temp_val.val[3])));
const uint8x16_t result =
vcombine_u8(vqmovn_u16(result_1), vqmovn_u16(result_2));
vst1q_u8(output + i, result);
}
#endif
for (; i < total_size; ++i) {
int32_t temp = MultiplyByQuantizedMultiplier(scratch[i], multiplier, shift);
temp += output_zp;
if (temp > output_max) {
temp = output_max;
}
if (temp < output_min) {
temp = output_min;
}
output[i] = static_cast<uint8_t>(temp);
}
}
inline void Quantize(const int32_t* multiplier, const int32_t* shift,
int32_t channel_size, int32_t total_size,
int32_t output_zp, int32_t output_min, int32_t output_max,
int32_t* scratch, int8_t* output) {
ruy::profiler::ScopeLabel label("Quantize/int8");
// Here we're trying to quantize the raw accumulators:
// output_channels
// data data data data data
// rows data data data data data
// data data data data data
// ....
//
// In order to minimize the reload of the multipliers & shifts, once we load
// the multipliers & shifts, we load & quantize the raw accumulators for every
// row.
#ifdef USE_NEON
const int32x4_t output_offset_vec = vdupq_n_s32(output_zp);
const int32x4_t output_activation_min_vec = vdupq_n_s32(output_min);
const int32x4_t output_activation_max_vec = vdupq_n_s32(output_max);
const int32x4_t zeros = vdupq_n_s32(0);
#endif
TFLITE_DCHECK_EQ(total_size % channel_size, 0);
const int32_t rows = total_size / channel_size;
int c = 0;
#ifdef USE_NEON
using gemmlowp::RoundingDivideByPOT;
for (; c <= channel_size - 8; c += 8) {
int32x4_t out_shift_1 = vld1q_s32(shift + c);
int32x4_t out_shift_2 = vld1q_s32(shift + c + 4);
int32x4_t left_shift_1 = vmaxq_s32(out_shift_1, zeros);
int32x4_t left_shift_2 = vmaxq_s32(out_shift_2, zeros);
// Right shift will be performed as left shift with negative values.
int32x4_t right_shift_1 = vminq_s32(out_shift_1, zeros);
int32x4_t right_shift_2 = vminq_s32(out_shift_2, zeros);
int32x4_t out_mul_1 = vld1q_s32(multiplier + c);
int32x4_t out_mul_2 = vld1q_s32(multiplier + c + 4);
for (int n = 0; n < rows; ++n) {
int loc = n * channel_size + c;
int32x4_t acc_1 = vld1q_s32(scratch + loc);
int32x4_t acc_2 = vld1q_s32(scratch + loc + 4);
// Saturating Rounding Doubling High Mul.
acc_1 = vshlq_s32(acc_1, left_shift_1);
acc_1 = vqrdmulhq_s32(acc_1, out_mul_1);
acc_2 = vshlq_s32(acc_2, left_shift_2);
acc_2 = vqrdmulhq_s32(acc_2, out_mul_2);
// Rounding Dividing By POT.
acc_1 = vrshlq_s32(acc_1, right_shift_1);
acc_2 = vrshlq_s32(acc_2, right_shift_2);
// Add the output offset.
acc_1 = vaddq_s32(acc_1, output_offset_vec);
acc_2 = vaddq_s32(acc_2, output_offset_vec);
// Apply the activation function.
acc_1 = vmaxq_s32(acc_1, output_activation_min_vec);
acc_1 = vminq_s32(acc_1, output_activation_max_vec);
acc_2 = vmaxq_s32(acc_2, output_activation_min_vec);
acc_2 = vminq_s32(acc_2, output_activation_max_vec);
// Saturating cast to int8 and store to destination.
const int16x4_t acc_s16_1 = vqmovn_s32(acc_1);
const int16x4_t acc_s16_2 = vqmovn_s32(acc_2);
const int16x8_t res_s16 = vcombine_s16(acc_s16_1, acc_s16_2);
const int8x8_t res_s8 = vqmovn_s16(res_s16);
vst1_s8(output + loc, res_s8);
}
}
#endif // USE_NEON
// Handle leftover values, one by one. This is very slow.
for (; c < channel_size; c++) {
for (int n = 0; n < rows; ++n) {
int loc = n * channel_size + c;
int32 acc = scratch[loc];
acc = MultiplyByQuantizedMultiplier(acc, multiplier[c], shift[c]);
acc += output_zp;
acc = std::max(acc, output_min);
acc = std::min(acc, output_max);
output[loc] = static_cast<int8>(acc);
}
}
}
// TransposeConvV2 expect the weights in HWOI order.
inline void TransposeConvV2(
const ConvParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& hwoi_ordered_filter_shape,
const uint8_t* hwoi_ordered_filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8_t* output_data, const RuntimeShape& col2im_shape,
int32_t* col2im_data, int32_t* scratch_data,
CpuBackendContext* cpu_backend_context) {
ruy::profiler::ScopeLabel label("TransposeConvV2/uint8");
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(hwoi_ordered_filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK(col2im_data);
TFLITE_DCHECK(hwoi_ordered_filter_data);
const int batch_size = MatchingDim(input_shape, 0, output_shape, 0);
const int input_image_size = input_shape.Dims(1) * input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_image_size = output_height * output_width;
const int input_depth =
MatchingDim(input_shape, 3, hwoi_ordered_filter_shape, 3);
const int output_depth =
MatchingDim(output_shape, 3, hwoi_ordered_filter_shape, 2);
const int input_offset = input_image_size * input_depth;
const int output_offset = output_image_size * output_depth;
const int filter_height = hwoi_ordered_filter_shape.Dims(0);
const int filter_width = hwoi_ordered_filter_shape.Dims(1);
const int padding_top = params.padding_values.height;
const int padding_bottom =
params.padding_values.height + params.padding_values.height_offset;
const int padding_left = params.padding_values.width;
const int padding_right =
params.padding_values.width + params.padding_values.width_offset;
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
const int hwoi_ordered_filter_total_size =
filter_height * filter_width * output_depth;
cpu_backend_gemm::MatrixParams<uint8_t> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = hwoi_ordered_filter_total_size;
lhs_params.cols = input_depth;
lhs_params.zero_point = -params.weights_offset;
int32_t* scratch_data_p = scratch_data;
std::fill_n(scratch_data, output_offset * batch_size, static_cast<int32>(0));
for (int i = 0; i < batch_size; ++i) {
cpu_backend_gemm::MatrixParams<uint8_t> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = input_depth;
rhs_params.cols = input_image_size;
rhs_params.zero_point = -params.input_offset;
cpu_backend_gemm::MatrixParams<int32_t> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = hwoi_ordered_filter_total_size;
dst_params.cols = input_image_size;
cpu_backend_gemm::GemmParams<int32_t, int32_t> gemm_params;
cpu_backend_gemm::Gemm(lhs_params, hwoi_ordered_filter_data, rhs_params,
input_data + input_offset * i, dst_params,
col2im_data, gemm_params, cpu_backend_context);
Col2im(col2im_data, output_depth, output_height, output_width,
filter_height, filter_width, padding_top, padding_left,
padding_bottom, padding_right, stride_height, stride_width,
scratch_data_p);
scratch_data_p += output_offset;
}
scratch_data_p = scratch_data;
BiasAdd(scratch_data_p, bias_data, batch_size, output_height, output_width,
output_depth);
Quantize(params.output_multiplier, params.output_shift,
output_shape.FlatSize(), params.output_offset, scratch_data,
output_data);
}
// Integer-only version of ResizeNearestNeighbor. Since scales are represented
// in fixed-point and thus approximated, |in_x| or |in_y| may differ from the
// reference version. Debug checks are in place to test if this occurs.
// NOTE: If align_corners or half_pixel_centers is true, we use the reference
// version.
inline void ResizeNearestNeighbor(
const tflite::ResizeNearestNeighborParams& op_params,
const RuntimeShape& unextended_input_shape, const uint8* input_data,
const RuntimeShape& output_size_shape, const int32* output_size_data,
const RuntimeShape& unextended_output_shape, uint8* output_data) {
if (op_params.align_corners || op_params.half_pixel_centers) {
// TODO(b/149823713): Add support for align_corners & half_pixel_centers in
// this kernel.
reference_ops::ResizeNearestNeighbor(
op_params, unextended_input_shape, input_data, output_size_shape,
output_size_data, unextended_output_shape, output_data);
return;
}
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
int32 input_height = input_shape.Dims(1);
int32 input_width = input_shape.Dims(2);
int32 depth = MatchingDim(input_shape, 3, output_shape, 3);
// The Tensorflow version of this op allows resize on the width and height
// axis only.
TFLITE_DCHECK_EQ(output_size_shape.FlatSize(), 2);
int32 output_height = output_size_data[0];
int32 output_width = output_size_data[1];
// Convert scales to fixed-point with 16 fractional bits. We add 1 as an
// error factor and to avoid zero scales. For example, with input_height = 1,
// output_height = 3, the float scaling factor would be non-zero at 1/3.
// With fixed-point, this is zero.
int32 height_scale = (input_height << 16) / output_height + 1;
int32 width_scale = (input_width << 16) / output_width + 1;
const int col_offset = input_shape.Dims(3);
const int row_offset = input_shape.Dims(2) * col_offset;
const int batch_offset = input_shape.Dims(1) * row_offset;
const uint8* input_ptr = input_data;
uint8* output_ptr = output_data;
for (int b = 0; b < batches; ++b) {
for (int y = 0; y < output_height; ++y) {
int32 in_y = std::min((y * height_scale) >> 16, input_height - 1);
// Check offset calculation is the same as the reference version. See
// function comment for details. We check using a non-float version of:
// TFLITE_DCHECK_EQ(in_y, std::floor(y * (static_cast<float>(input_height)
// / output_height)));
TFLITE_DCHECK_LT(y * input_height, output_height + in_y * output_height);
TFLITE_DCHECK_GE(y * input_height, in_y * output_height);
const uint8* y_input_ptr = input_ptr + in_y * row_offset;
for (int x = 0; x < output_width; ++x) {
int32 in_x = std::min((x * width_scale) >> 16, input_width - 1);
// Check offset calculation is the same as the reference version. See
// function comment for details. We check using a non-float version of:
// TFLITE_DCHECK_EQ(in_y,
// std::floor(y * (static_cast<float>(input_width)
// / output_width)));
TFLITE_DCHECK_LT(x * input_width, output_width + in_x * output_width);
TFLITE_DCHECK_GE(x * input_width, in_x * output_width);
const uint8* x_input_ptr = y_input_ptr + in_x * col_offset;
memcpy(output_ptr, x_input_ptr, depth);
output_ptr += depth;
}
}
input_ptr += batch_offset;
}
}
template <typename input_type, typename output_type>
inline void Requantize(const input_type* input_data, int32_t size,
int32_t effective_scale_multiplier,
int32_t effective_scale_shift, int32_t input_zeropoint,
int32_t output_zeropoint, output_type* output_data) {
reference_ops::Requantize(input_data, size, effective_scale_multiplier,
effective_scale_shift, input_zeropoint,
output_zeropoint, output_data);
}
template <>
inline void Requantize<int8_t, uint8_t>(const int8_t* input_data, int32_t size,
int32_t effective_scale_multiplier,
int32_t effective_scale_shift,
int32_t input_zeropoint,
int32_t output_zeropoint,
uint8_t* output_data) {
ruy::profiler::ScopeLabel label("Requantize/Int8ToUint8");
static constexpr int32_t kMinOutput = std::numeric_limits<uint8_t>::min();
static constexpr int32_t kMaxOutput = std::numeric_limits<uint8_t>::max();
int i = 0;
#ifdef USE_NEON
// Constants.
const int32x4_t input_zero_point_dup = vdupq_n_s32(-input_zeropoint);
const int32x4_t output_zero_point_dup = vdupq_n_s32(output_zeropoint);
const int32x4_t min_val_dup = vdupq_n_s32(kMinOutput);
const int32x4_t max_val_dup = vdupq_n_s32(kMaxOutput);
for (; i <= size - 16; i += 16) {
const int8x16_t input_vec = vld1q_s8(input_data + i);
const int16x8_t first_half = vmovl_s8(vget_low_s8(input_vec));
const int16x8_t second_half = vmovl_s8(vget_high_s8(input_vec));
int32x4x4_t input;
input.val[0] = vmovl_s16(vget_low_s16(first_half));
input.val[1] = vmovl_s16(vget_high_s16(first_half));
input.val[2] = vmovl_s16(vget_low_s16(second_half));
input.val[3] = vmovl_s16(vget_high_s16(second_half));
input.val[0] = vaddq_s32(input.val[0], input_zero_point_dup);
input.val[1] = vaddq_s32(input.val[1], input_zero_point_dup);
input.val[2] = vaddq_s32(input.val[2], input_zero_point_dup);
input.val[3] = vaddq_s32(input.val[3], input_zero_point_dup);
int32x4x4_t result = MultiplyByQuantizedMultiplier4Rows(
input, effective_scale_multiplier, effective_scale_shift);
result.val[0] = vaddq_s32(result.val[0], output_zero_point_dup);
result.val[1] = vaddq_s32(result.val[1], output_zero_point_dup);
result.val[2] = vaddq_s32(result.val[2], output_zero_point_dup);
result.val[3] = vaddq_s32(result.val[3], output_zero_point_dup);
result.val[0] =
vmaxq_s32(vminq_s32(result.val[0], max_val_dup), min_val_dup);
result.val[1] =
vmaxq_s32(vminq_s32(result.val[1], max_val_dup), min_val_dup);
result.val[2] =
vmaxq_s32(vminq_s32(result.val[2], max_val_dup), min_val_dup);
result.val[3] =
vmaxq_s32(vminq_s32(result.val[3], max_val_dup), min_val_dup);
const uint32x4_t result_val_1_unsigned =
vreinterpretq_u32_s32(result.val[0]);
const uint32x4_t result_val_2_unsigned =
vreinterpretq_u32_s32(result.val[1]);
const uint32x4_t result_val_3_unsigned =
vreinterpretq_u32_s32(result.val[2]);
const uint32x4_t result_val_4_unsigned =
vreinterpretq_u32_s32(result.val[3]);
const uint16x4_t narrowed_val_1 = vqmovn_u32(result_val_1_unsigned);
const uint16x4_t narrowed_val_2 = vqmovn_u32(result_val_2_unsigned);
const uint16x4_t narrowed_val_3 = vqmovn_u32(result_val_3_unsigned);
const uint16x4_t narrowed_val_4 = vqmovn_u32(result_val_4_unsigned);
const uint16x8_t output_first_half =
vcombine_u16(narrowed_val_1, narrowed_val_2);
const uint16x8_t output_second_half =
vcombine_u16(narrowed_val_3, narrowed_val_4);
const uint8x8_t narrowed_first_half = vqmovn_u16(output_first_half);
const uint8x8_t narrowed_second_half = vqmovn_u16(output_second_half);
const uint8x16_t narrowed_result =
vcombine_u8(narrowed_first_half, narrowed_second_half);
vst1q_u8(output_data + i, narrowed_result);
}
#endif
for (; i < size; ++i) {
const int32_t input = input_data[i] - input_zeropoint;
const int32_t output =
MultiplyByQuantizedMultiplier(input, effective_scale_multiplier,
effective_scale_shift) +
output_zeropoint;
const int32_t clamped_output =
std::max(std::min(output, kMaxOutput), kMinOutput);
output_data[i] = static_cast<uint8_t>(clamped_output);
}
}
template <>
inline void Requantize<uint8_t, int8_t>(const uint8_t* input_data, int32_t size,
int32_t effective_scale_multiplier,
int32_t effective_scale_shift,
int32_t input_zeropoint,
int32_t output_zeropoint,
int8_t* output_data) {
ruy::profiler::ScopeLabel label("Requantize/Uint8ToInt8");
static constexpr int32_t kMinOutput = std::numeric_limits<int8_t>::min();
static constexpr int32_t kMaxOutput = std::numeric_limits<int8_t>::max();
int i = 0;
#ifdef USE_NEON
// Constants.
const int32x4_t input_zero_point_dup = vdupq_n_s32(-input_zeropoint);
const int32x4_t output_zero_point_dup = vdupq_n_s32(output_zeropoint);
const int32x4_t min_val_dup = vdupq_n_s32(kMinOutput);
const int32x4_t max_val_dup = vdupq_n_s32(kMaxOutput);
for (; i <= size - 16; i += 16) {
const uint8x16_t input_vec = vld1q_u8(input_data + i);
const uint16x8_t first_half = vmovl_u8(vget_low_u8(input_vec));
const uint16x8_t second_half = vmovl_u8(vget_high_u8(input_vec));
int32x4x4_t input;
input.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(first_half)));
input.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(first_half)));
input.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(second_half)));
input.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(second_half)));
input.val[0] = vaddq_s32(input.val[0], input_zero_point_dup);
input.val[1] = vaddq_s32(input.val[1], input_zero_point_dup);
input.val[2] = vaddq_s32(input.val[2], input_zero_point_dup);
input.val[3] = vaddq_s32(input.val[3], input_zero_point_dup);
int32x4x4_t result = MultiplyByQuantizedMultiplier4Rows(
input, effective_scale_multiplier, effective_scale_shift);
result.val[0] = vaddq_s32(result.val[0], output_zero_point_dup);
result.val[1] = vaddq_s32(result.val[1], output_zero_point_dup);
result.val[2] = vaddq_s32(result.val[2], output_zero_point_dup);
result.val[3] = vaddq_s32(result.val[3], output_zero_point_dup);
result.val[0] =
vmaxq_s32(vminq_s32(result.val[0], max_val_dup), min_val_dup);
result.val[1] =
vmaxq_s32(vminq_s32(result.val[1], max_val_dup), min_val_dup);
result.val[2] =
vmaxq_s32(vminq_s32(result.val[2], max_val_dup), min_val_dup);
result.val[3] =
vmaxq_s32(vminq_s32(result.val[3], max_val_dup), min_val_dup);
const int16x4_t narrowed_val_1 = vqmovn_s32(result.val[0]);
const int16x4_t narrowed_val_2 = vqmovn_s32(result.val[1]);
const int16x4_t narrowed_val_3 = vqmovn_s32(result.val[2]);
const int16x4_t narrowed_val_4 = vqmovn_s32(result.val[3]);
const int16x8_t output_first_half =
vcombine_s16(narrowed_val_1, narrowed_val_2);
const int16x8_t output_second_half =
vcombine_s16(narrowed_val_3, narrowed_val_4);
const int8x8_t narrowed_first_half = vqmovn_s16(output_first_half);
const int8x8_t narrowed_second_half = vqmovn_s16(output_second_half);
const int8x16_t narrowed_result =
vcombine_s8(narrowed_first_half, narrowed_second_half);
vst1q_s8(output_data + i, narrowed_result);
}
#endif
for (; i < size; ++i) {
const int32_t input = input_data[i] - input_zeropoint;
const int32_t output =
MultiplyByQuantizedMultiplier(input, effective_scale_multiplier,
effective_scale_shift) +
output_zeropoint;
const int32_t clamped_output =
std::max(std::min(output, kMaxOutput), kMinOutput);
output_data[i] = static_cast<int8_t>(clamped_output);
}
}
template <>
inline void Requantize<int8_t, int8_t>(const int8_t* input_data, int32_t size,
int32_t effective_scale_multiplier,
int32_t effective_scale_shift,
int32_t input_zeropoint,
int32_t output_zeropoint,
int8_t* output_data) {
ruy::profiler::ScopeLabel label("Requantize/Int8ToInt8");
static constexpr int32_t kMinOutput = std::numeric_limits<int8_t>::min();
static constexpr int32_t kMaxOutput = std::numeric_limits<int8_t>::max();
int i = 0;
#ifdef USE_NEON
// Constants.
const int32x4_t input_zero_point_dup = vdupq_n_s32(-input_zeropoint);
const int32x4_t output_zero_point_dup = vdupq_n_s32(output_zeropoint);
const int32x4_t min_val_dup = vdupq_n_s32(kMinOutput);
const int32x4_t max_val_dup = vdupq_n_s32(kMaxOutput);
for (; i <= size - 16; i += 16) {
const int8x16_t input_vec = vld1q_s8(input_data + i);
const int16x8_t first_half = vmovl_s8(vget_low_s8(input_vec));
const int16x8_t second_half = vmovl_s8(vget_high_s8(input_vec));
int32x4x4_t input;
input.val[0] = vmovl_s16(vget_low_s16(first_half));
input.val[1] = vmovl_s16(vget_high_s16(first_half));
input.val[2] = vmovl_s16(vget_low_s16(second_half));
input.val[3] = vmovl_s16(vget_high_s16(second_half));
input.val[0] = vaddq_s32(input.val[0], input_zero_point_dup);
input.val[1] = vaddq_s32(input.val[1], input_zero_point_dup);
input.val[2] = vaddq_s32(input.val[2], input_zero_point_dup);
input.val[3] = vaddq_s32(input.val[3], input_zero_point_dup);
int32x4x4_t result = MultiplyByQuantizedMultiplier4Rows(
input, effective_scale_multiplier, effective_scale_shift);
result.val[0] = vaddq_s32(result.val[0], output_zero_point_dup);
result.val[1] = vaddq_s32(result.val[1], output_zero_point_dup);
result.val[2] = vaddq_s32(result.val[2], output_zero_point_dup);
result.val[3] = vaddq_s32(result.val[3], output_zero_point_dup);
result.val[0] =
vmaxq_s32(vminq_s32(result.val[0], max_val_dup), min_val_dup);
result.val[1] =
vmaxq_s32(vminq_s32(result.val[1], max_val_dup), min_val_dup);
result.val[2] =
vmaxq_s32(vminq_s32(result.val[2], max_val_dup), min_val_dup);
result.val[3] =
vmaxq_s32(vminq_s32(result.val[3], max_val_dup), min_val_dup);
const int16x4_t narrowed_val_1 = vqmovn_s32(result.val[0]);
const int16x4_t narrowed_val_2 = vqmovn_s32(result.val[1]);
const int16x4_t narrowed_val_3 = vqmovn_s32(result.val[2]);
const int16x4_t narrowed_val_4 = vqmovn_s32(result.val[3]);
const int16x8_t output_first_half =
vcombine_s16(narrowed_val_1, narrowed_val_2);
const int16x8_t output_second_half =
vcombine_s16(narrowed_val_3, narrowed_val_4);
const int8x8_t narrowed_first_half = vqmovn_s16(output_first_half);
const int8x8_t narrowed_second_half = vqmovn_s16(output_second_half);
const int8x16_t narrowed_result =
vcombine_s8(narrowed_first_half, narrowed_second_half);
vst1q_s8(output_data + i, narrowed_result);
}
#endif
for (; i < size; ++i) {
const int32_t input = input_data[i] - input_zeropoint;
const int32_t output =
MultiplyByQuantizedMultiplier(input, effective_scale_multiplier,
effective_scale_shift) +
output_zeropoint;
const int32_t clamped_output =
std::max(std::min(output, kMaxOutput), kMinOutput);
output_data[i] = static_cast<int8_t>(clamped_output);
}
}
template <>
inline void Requantize<uint8_t, uint8_t>(
const uint8_t* input_data, int32_t size, int32_t effective_scale_multiplier,
int32_t effective_scale_shift, int32_t input_zeropoint,
int32_t output_zeropoint, uint8_t* output_data) {
ruy::profiler::ScopeLabel label("Requantize/Uint8ToUint8");
static constexpr int32_t kMinOutput = std::numeric_limits<uint8_t>::min();
static constexpr int32_t kMaxOutput = std::numeric_limits<uint8_t>::max();
int i = 0;
#ifdef USE_NEON
// Constants.
const int32x4_t input_zero_point_dup = vdupq_n_s32(-input_zeropoint);
const int32x4_t output_zero_point_dup = vdupq_n_s32(output_zeropoint);
const int32x4_t min_val_dup = vdupq_n_s32(kMinOutput);
const int32x4_t max_val_dup = vdupq_n_s32(kMaxOutput);
for (; i <= size - 16; i += 16) {
const uint8x16_t input_vec = vld1q_u8(input_data + i);
const uint16x8_t first_half = vmovl_u8(vget_low_u8(input_vec));
const uint16x8_t second_half = vmovl_u8(vget_high_u8(input_vec));
int32x4x4_t input;
input.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(first_half)));
input.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(first_half)));
input.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(second_half)));
input.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(second_half)));
input.val[0] = vaddq_s32(input.val[0], input_zero_point_dup);
input.val[1] = vaddq_s32(input.val[1], input_zero_point_dup);
input.val[2] = vaddq_s32(input.val[2], input_zero_point_dup);
input.val[3] = vaddq_s32(input.val[3], input_zero_point_dup);
int32x4x4_t result = MultiplyByQuantizedMultiplier4Rows(
input, effective_scale_multiplier, effective_scale_shift);
result.val[0] = vaddq_s32(result.val[0], output_zero_point_dup);
result.val[1] = vaddq_s32(result.val[1], output_zero_point_dup);
result.val[2] = vaddq_s32(result.val[2], output_zero_point_dup);
result.val[3] = vaddq_s32(result.val[3], output_zero_point_dup);
result.val[0] =
vmaxq_s32(vminq_s32(result.val[0], max_val_dup), min_val_dup);
result.val[1] =
vmaxq_s32(vminq_s32(result.val[1], max_val_dup), min_val_dup);
result.val[2] =
vmaxq_s32(vminq_s32(result.val[2], max_val_dup), min_val_dup);
result.val[3] =
vmaxq_s32(vminq_s32(result.val[3], max_val_dup), min_val_dup);
const uint32x4_t result_val_1_unsigned =
vreinterpretq_u32_s32(result.val[0]);
const uint32x4_t result_val_2_unsigned =
vreinterpretq_u32_s32(result.val[1]);
const uint32x4_t result_val_3_unsigned =
vreinterpretq_u32_s32(result.val[2]);
const uint32x4_t result_val_4_unsigned =
vreinterpretq_u32_s32(result.val[3]);
const uint16x4_t narrowed_val_1 = vqmovn_u32(result_val_1_unsigned);
const uint16x4_t narrowed_val_2 = vqmovn_u32(result_val_2_unsigned);
const uint16x4_t narrowed_val_3 = vqmovn_u32(result_val_3_unsigned);
const uint16x4_t narrowed_val_4 = vqmovn_u32(result_val_4_unsigned);
const uint16x8_t output_first_half =
vcombine_u16(narrowed_val_1, narrowed_val_2);
const uint16x8_t output_second_half =
vcombine_u16(narrowed_val_3, narrowed_val_4);
const uint8x8_t narrowed_first_half = vqmovn_u16(output_first_half);
const uint8x8_t narrowed_second_half = vqmovn_u16(output_second_half);
const uint8x16_t narrowed_result =
vcombine_u8(narrowed_first_half, narrowed_second_half);
vst1q_u8(output_data + i, narrowed_result);
}
#endif
for (; i < size; ++i) {
const int32_t input = input_data[i] - input_zeropoint;
const int32_t output =
MultiplyByQuantizedMultiplier(input, effective_scale_multiplier,
effective_scale_shift) +
output_zeropoint;
const int32_t clamped_output =
std::max(std::min(output, kMaxOutput), kMinOutput);
output_data[i] = static_cast<uint8_t>(clamped_output);
}
}
inline void HardSwish(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("HardSwish/Float");
auto size = MatchingFlatSize(input_shape, output_shape);
int i = 0;
#ifdef USE_NEON
const float32x4_t zero = vdupq_n_f32(0.0f);
const float32x4_t three = vdupq_n_f32(3.0f);
const float32x4_t six = vdupq_n_f32(6.0f);
const float32x4_t one_sixth = vdupq_n_f32(1.0f / 6.0f);
for (; i <= size - 16; i += 16) {
// 4x partially unrolled version of the loop below. Refer to its comments.
const float32x4_t in_0 = vld1q_f32(input_data + i + 0);
const float32x4_t in_1 = vld1q_f32(input_data + i + 4);
const float32x4_t in_2 = vld1q_f32(input_data + i + 8);
const float32x4_t in_3 = vld1q_f32(input_data + i + 12);
const float32x4_t in_scaled_0 = vmulq_f32(in_0, one_sixth);
const float32x4_t in_scaled_1 = vmulq_f32(in_1, one_sixth);
const float32x4_t in_scaled_2 = vmulq_f32(in_2, one_sixth);
const float32x4_t in_scaled_3 = vmulq_f32(in_3, one_sixth);
const float32x4_t in_reluish_0 =
vminq_f32(six, vmaxq_f32(zero, vaddq_f32(in_0, three)));
const float32x4_t in_reluish_1 =
vminq_f32(six, vmaxq_f32(zero, vaddq_f32(in_1, three)));
const float32x4_t in_reluish_2 =
vminq_f32(six, vmaxq_f32(zero, vaddq_f32(in_2, three)));
const float32x4_t in_reluish_3 =
vminq_f32(six, vmaxq_f32(zero, vaddq_f32(in_3, three)));
const float32x4_t product_0 = vmulq_f32(in_scaled_0, in_reluish_0);
const float32x4_t product_1 = vmulq_f32(in_scaled_1, in_reluish_1);
const float32x4_t product_2 = vmulq_f32(in_scaled_2, in_reluish_2);
const float32x4_t product_3 = vmulq_f32(in_scaled_3, in_reluish_3);
vst1q_f32(output_data + i + 0, product_0);
vst1q_f32(output_data + i + 4, product_1);
vst1q_f32(output_data + i + 8, product_2);
vst1q_f32(output_data + i + 12, product_3);
}
for (; i <= size - 4; i += 4) {
// The expression to be computed is:
// out = one_sixth * in * min(six, max(zero, (in + three)))
// We structure the AST to have two roughly balanced, independent branches:
// - Multiplication: in_scaled = one_sixth * in.
// - Addition and clamping: in_reluish = min(six, max(zero, (in + three))).
// Then the remaining multiplication at the root of the tree.
const float32x4_t in = vld1q_f32(input_data + i);
const float32x4_t in_scaled = vmulq_f32(in, one_sixth);
const float32x4_t in_reluish =
vminq_f32(six, vmaxq_f32(zero, vaddq_f32(in, three)));
const float32x4_t product = vmulq_f32(in_scaled, in_reluish);
vst1q_f32(output_data + i, product);
}
#endif
for (; i < size; i++) {
const float in = input_data[i];
output_data[i] =
in * std::min(6.0f, std::max(0.0f, in + 3.0f)) * (1.0f / 6.0f);
}
}
#ifdef USE_NEON
inline void SaturateAndStore(int16x8_t src, std::uint8_t* dst) {
// Narrow values down to 8 bit unsigned, saturating.
uint8x8_t res8 = vqmovun_s16(src);
// Store results to destination.
vst1_u8(dst, res8);
}
inline void SaturateAndStore(int16x8_t src, std::int8_t* dst) {
// Narrow values down to 8 bit unsigned, saturating.
int8x8_t res8 = vqmovn_s16(src);
// Store results to destination.
vst1_s8(dst, res8);
}
#endif
template <typename T>
inline void HardSwish(const HardSwishParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("HardSwish/Quantized");
const int flat_size = MatchingFlatSize(input_shape, output_shape);
int i = 0;
// This code heavily uses NEON saturating left shifts (vqshl*) with shift
// amounts that can be zero, in which case we rely on the correct behavior
// of a left shift by zero returning just its first operand unmodified.
// Unfortunately, the Intel arm_neon_sse.h implementation of vqshl* is
// buggy in the case of zero shift amounts, see b/137199585. That is why
// this NEON code path is restricted to true ARM NEON, excluding
// arm_neon_sse.h. Anyway, the arm_neon_sse.h implementation of saturating
// left shifts is slow scalar code, so there may not be much benefit in
// running that over just plain reference code.
//
// TODO(b/137199585): revisit when this is fixed.
#ifdef __ARM_NEON
const int16x8_t positive_reluish_multiplier_exponent_minus_one =
vdupq_n_s16(std::max(0, params.reluish_multiplier_exponent - 1));
const int16x8_t positive_reluish_multiplier_exponent_last_bit =
vdupq_n_s16(params.reluish_multiplier_exponent > 0 ? 1 : 0);
const int16x8_t negative_reluish_multiplier_exponent =
vdupq_n_s16(std::min(0, params.reluish_multiplier_exponent));
const int16x8_t constant_32767 = vdupq_n_s16(32767);
const int16x8_t output_multiplier_exponent =
vdupq_n_s16(params.output_multiplier_exponent);
const int16x8_t output_zero_point = vdupq_n_s16(params.output_zero_point);
// 4x unrolled version of the below NEON loop. Read that first.
for (; i <= flat_size - 32; i += 32) {
using cpu_backend_gemm::detail::Load16AndSubtractZeroPoint;
const int16x8x2_t input_value_0_1 =
Load16AndSubtractZeroPoint(input_data + i, params.input_zero_point);
const int16x8x2_t input_value_2_3 = Load16AndSubtractZeroPoint(
input_data + i + 16, params.input_zero_point);
const int16x8_t input_value_on_hires_input_scale_0 =
vshlq_n_s16(input_value_0_1.val[0], 7);
const int16x8_t input_value_on_hires_input_scale_1 =
vshlq_n_s16(input_value_0_1.val[1], 7);
const int16x8_t input_value_on_hires_input_scale_2 =
vshlq_n_s16(input_value_2_3.val[0], 7);
const int16x8_t input_value_on_hires_input_scale_3 =
vshlq_n_s16(input_value_2_3.val[1], 7);
const int16x8_t input_value_on_preshift_output_scale_0 =
vqrdmulhq_n_s16(input_value_on_hires_input_scale_0,
params.output_multiplier_fixedpoint_int16);
const int16x8_t input_value_on_preshift_output_scale_1 =
vqrdmulhq_n_s16(input_value_on_hires_input_scale_1,
params.output_multiplier_fixedpoint_int16);
const int16x8_t input_value_on_preshift_output_scale_2 =
vqrdmulhq_n_s16(input_value_on_hires_input_scale_2,
params.output_multiplier_fixedpoint_int16);
const int16x8_t input_value_on_preshift_output_scale_3 =
vqrdmulhq_n_s16(input_value_on_hires_input_scale_3,
params.output_multiplier_fixedpoint_int16);
int16x8_t reluish_value_0 = input_value_on_hires_input_scale_0;
int16x8_t reluish_value_1 = input_value_on_hires_input_scale_1;
int16x8_t reluish_value_2 = input_value_on_hires_input_scale_2;
int16x8_t reluish_value_3 = input_value_on_hires_input_scale_3;
reluish_value_0 = vqshlq_s16(
reluish_value_0, positive_reluish_multiplier_exponent_minus_one);
reluish_value_1 = vqshlq_s16(
reluish_value_1, positive_reluish_multiplier_exponent_minus_one);
reluish_value_2 = vqshlq_s16(
reluish_value_2, positive_reluish_multiplier_exponent_minus_one);
reluish_value_3 = vqshlq_s16(
reluish_value_3, positive_reluish_multiplier_exponent_minus_one);
reluish_value_0 = vqrdmulhq_n_s16(
reluish_value_0, params.reluish_multiplier_fixedpoint_int16);
reluish_value_1 = vqrdmulhq_n_s16(
reluish_value_1, params.reluish_multiplier_fixedpoint_int16);
reluish_value_2 = vqrdmulhq_n_s16(
reluish_value_2, params.reluish_multiplier_fixedpoint_int16);
reluish_value_3 = vqrdmulhq_n_s16(
reluish_value_3, params.reluish_multiplier_fixedpoint_int16);
reluish_value_0 = vqshlq_s16(reluish_value_0,
positive_reluish_multiplier_exponent_last_bit);
reluish_value_1 = vqshlq_s16(reluish_value_1,
positive_reluish_multiplier_exponent_last_bit);
reluish_value_2 = vqshlq_s16(reluish_value_2,
positive_reluish_multiplier_exponent_last_bit);
reluish_value_3 = vqshlq_s16(reluish_value_3,
positive_reluish_multiplier_exponent_last_bit);
reluish_value_0 =
vrshlq_s16(reluish_value_0, negative_reluish_multiplier_exponent);
reluish_value_1 =
vrshlq_s16(reluish_value_1, negative_reluish_multiplier_exponent);
reluish_value_2 =
vrshlq_s16(reluish_value_2, negative_reluish_multiplier_exponent);
reluish_value_3 =
vrshlq_s16(reluish_value_3, negative_reluish_multiplier_exponent);
reluish_value_0 = vrhaddq_s16(reluish_value_0, constant_32767);
reluish_value_1 = vrhaddq_s16(reluish_value_1, constant_32767);
reluish_value_2 = vrhaddq_s16(reluish_value_2, constant_32767);
reluish_value_3 = vrhaddq_s16(reluish_value_3, constant_32767);
const int16x8_t preshift_output_value_0 =
vqdmulhq_s16(reluish_value_0, input_value_on_preshift_output_scale_0);
const int16x8_t preshift_output_value_1 =
vqdmulhq_s16(reluish_value_1, input_value_on_preshift_output_scale_1);
const int16x8_t preshift_output_value_2 =
vqdmulhq_s16(reluish_value_2, input_value_on_preshift_output_scale_2);
const int16x8_t preshift_output_value_3 =
vqdmulhq_s16(reluish_value_3, input_value_on_preshift_output_scale_3);
int16x8_t output_value_0 =
vrshlq_s16(preshift_output_value_0, output_multiplier_exponent);
int16x8_t output_value_1 =
vrshlq_s16(preshift_output_value_1, output_multiplier_exponent);
int16x8_t output_value_2 =
vrshlq_s16(preshift_output_value_2, output_multiplier_exponent);
int16x8_t output_value_3 =
vrshlq_s16(preshift_output_value_3, output_multiplier_exponent);
output_value_0 = vaddq_s16(output_value_0, output_zero_point);
output_value_1 = vaddq_s16(output_value_1, output_zero_point);
output_value_2 = vaddq_s16(output_value_2, output_zero_point);
output_value_3 = vaddq_s16(output_value_3, output_zero_point);
SaturateAndStore(output_value_0, output_data + i);
SaturateAndStore(output_value_1, output_data + i + 8);
SaturateAndStore(output_value_2, output_data + i + 16);
SaturateAndStore(output_value_3, output_data + i + 24);
}
// NEON version of reference_ops::HardSwish. Read that first.
for (; i <= flat_size - 8; i += 8) {
using cpu_backend_gemm::detail::Load8AndSubtractZeroPoint;
const int16x8_t input_value =
Load8AndSubtractZeroPoint(input_data + i, params.input_zero_point);
const int16x8_t input_value_on_hires_input_scale =
vshlq_n_s16(input_value, 7);
const int16x8_t input_value_on_preshift_output_scale =
vqrdmulhq_n_s16(input_value_on_hires_input_scale,
params.output_multiplier_fixedpoint_int16);
int16x8_t reluish_value = input_value_on_hires_input_scale;
reluish_value = vqshlq_s16(reluish_value,
positive_reluish_multiplier_exponent_minus_one);
reluish_value = vqrdmulhq_n_s16(reluish_value,
params.reluish_multiplier_fixedpoint_int16);
reluish_value = vqshlq_s16(reluish_value,
positive_reluish_multiplier_exponent_last_bit);
reluish_value =
vrshlq_s16(reluish_value, negative_reluish_multiplier_exponent);
reluish_value = vrhaddq_s16(reluish_value, constant_32767);
const int16x8_t preshift_output_value =
vqdmulhq_s16(reluish_value, input_value_on_preshift_output_scale);
int16x8_t output_value =
vrshlq_s16(preshift_output_value, output_multiplier_exponent);
output_value = vaddq_s16(output_value, output_zero_point);
SaturateAndStore(output_value, output_data + i);
}
#endif
// TODO(b/137208495): revisit when unit tests cover reference code.
// Fall back to reference_ops::HardSwish. In general we have preferred
// to duplicate such scalar code rather than call reference code to handle
// leftovers, thinking that code duplication was not a big concern.
// However, most of our unit tests happen to test only optimized code,
// and the quantized HardSwish implementation is nontrivial enough that
// I really want test coverage for the reference code.
if (i < flat_size) {
const RuntimeShape leftover_shape{flat_size - i};
reference_ops::HardSwish(params, leftover_shape, input_data + i,
leftover_shape, output_data + i);
}
}
template <typename T>
inline void IntegerExponentPow(const ArithmeticParams& params,
const RuntimeShape& unextended_base_shape,
const T* base_data, const int exponent,
const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK_GE(exponent, 1);
if (exponent == 1) {
// copy data over.
std::memcpy(output_data, base_data,
unextended_base_shape.FlatSize() * sizeof(T));
} else {
IntegerExponentPow(params, unextended_base_shape, base_data, exponent / 2,
unextended_output_shape, output_data);
Mul(params, unextended_base_shape, output_data, unextended_base_shape,
output_data, unextended_output_shape, output_data);
if (exponent % 2 == 1) {
Mul(params, unextended_base_shape, base_data, unextended_base_shape,
output_data, unextended_output_shape, output_data);
}
}
}
template <typename T>
inline void BroadcastPow4D(const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("PowBroadcast");
if (unextended_input2_shape.FlatSize() == 1) {
static const float epsilon = 1e-5;
const T exponent = input2_data[0];
const int int_exponent = static_cast<int>(std::round(exponent));
if ((std::abs(input2_data[0] - int_exponent) < epsilon) &&
(int_exponent >= 1)) {
ArithmeticParams params;
if (std::is_same<T, float>::value) {
params.float_activation_max = std::numeric_limits<float>::max();
params.float_activation_min = std::numeric_limits<float>::lowest();
} else if (std::is_same<T, int>::value) {
params.quantized_activation_max = std::numeric_limits<int>::max();
params.quantized_activation_min = std::numeric_limits<int>::lowest();
}
IntegerExponentPow(params, unextended_input1_shape, input1_data,
int_exponent, unextended_output_shape, output_data);
return;
}
}
reference_ops::BroadcastPow4DSlow(unextended_input1_shape, input1_data,
unextended_input2_shape, input2_data,
unextended_output_shape, output_data);
}
#ifdef USE_NEON
inline void ScaleWithNewZeroPoint(const int32x4_t input,
const float32x4_t scale_dup,
const float32x4_t zero_times_scale_dup,
float32x4_t* output) {
#ifdef __ARM_FEATURE_FMA
*output = vfmaq_f32(zero_times_scale_dup, vcvtq_f32_s32(input), scale_dup);
#else
*output = vaddq_f32(vmulq_f32(vcvtq_f32_s32(input), scale_dup),
zero_times_scale_dup);
#endif
}
#endif // USE_NEON
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape,
const uint8_t* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Dequantize/Uint8");
const int32 zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
int i = 0;
#ifdef USE_NEON
const float32x4_t scale_dup = vdupq_n_f32(static_cast<float>(scale));
const float32x4_t zero_times_scale_dup =
vdupq_n_f32(static_cast<float>(-zero_point * scale));
for (; i <= flat_size - 8; i += 8) {
const uint8x8_t input_u8 = vld1_u8(input_data + i);
const uint16x8_t input_u16 = vmovl_u8(input_u8);
const int16x8_t input_s16 = vreinterpretq_s16_u16(input_u16);
const int16x4_t input_s16_low = vget_low_s16(input_s16);
const int16x4_t input_s16_high = vget_high_s16(input_s16);
const int32x4_t val_low = vmovl_s16(input_s16_low);
const int32x4_t val_high = vmovl_s16(input_s16_high);
float32x4_t result_low, result_high;
ScaleWithNewZeroPoint(val_low, scale_dup, zero_times_scale_dup,
&result_low);
ScaleWithNewZeroPoint(val_high, scale_dup, zero_times_scale_dup,
&result_high);
vst1q_f32(output_data + i, result_low);
vst1q_f32(output_data + i + 4, result_high);
}
#endif // NEON
for (; i < flat_size; ++i) {
const int32 val = input_data[i];
const float result = static_cast<float>(scale * (val - zero_point));
output_data[i] = result;
}
}
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape,
const int8_t* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Dequantize/Int8");
const int32 zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
int i = 0;
#ifdef USE_NEON
const float32x4_t scale_dup = vdupq_n_f32(static_cast<float>(scale));
const float32x4_t zero_times_scale_dup =
vdupq_n_f32(static_cast<float>(-zero_point * scale));
for (; i <= flat_size - 8; i += 8) {
const int8x8_t input_s8 = vld1_s8(input_data + i);
const int16x8_t input_s16 = vmovl_s8(input_s8);
const int16x4_t input_s16_low = vget_low_s16(input_s16);
const int16x4_t input_s16_high = vget_high_s16(input_s16);
const int32x4_t val_low = vmovl_s16(input_s16_low);
const int32x4_t val_high = vmovl_s16(input_s16_high);
float32x4_t result_low, result_high;
ScaleWithNewZeroPoint(val_low, scale_dup, zero_times_scale_dup,
&result_low);
ScaleWithNewZeroPoint(val_high, scale_dup, zero_times_scale_dup,
&result_high);
vst1q_f32(output_data + i, result_low);
vst1q_f32(output_data + i + 4, result_high);
}
#endif // NEON
for (; i < flat_size; ++i) {
const int32 val = input_data[i];
const float result = static_cast<float>(scale * (val - zero_point));
output_data[i] = result;
}
}
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape,
const int16_t* input_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("Dequantize/Int16");
const int32 zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
int i = 0;
#ifdef USE_NEON
const float32x4_t scale_dup = vdupq_n_f32(static_cast<float>(scale));
const float32x4_t zero_times_scale_dup =
vdupq_n_f32(static_cast<float>(-zero_point * scale));
for (; i <= flat_size - 8; i += 8) {
const int16x4_t input_s16_low = vld1_s16(input_data + i);
const int16x4_t input_s16_high = vld1_s16(input_data + i + 4);
const int32x4_t val_low = vmovl_s16(input_s16_low);
const int32x4_t val_high = vmovl_s16(input_s16_high);
float32x4_t result_low, result_high;
ScaleWithNewZeroPoint(val_low, scale_dup, zero_times_scale_dup,
&result_low);
ScaleWithNewZeroPoint(val_high, scale_dup, zero_times_scale_dup,
&result_high);
vst1q_f32(output_data + i, result_low);
vst1q_f32(output_data + i + 4, result_high);
}
#endif // NEON
for (; i < flat_size; ++i) {
const int32 val = input_data[i];
const float result = static_cast<float>(scale * (val - zero_point));
output_data[i] = result;
}
}
inline void Dequantize(const RuntimeShape& input_shape,
const Eigen::half* input_data,
const RuntimeShape& output_shape, float* output_data) {
reference_ops::Dequantize(input_shape, input_data, output_shape, output_data);
}
template <typename T>
inline void AffineQuantize(const tflite::QuantizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, T* output_data) {
reference_ops::AffineQuantize(op_params, input_shape, input_data,
output_shape, output_data);
}
template <>
inline void AffineQuantize(const tflite::QuantizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
int8_t* output_data) {
ruy::profiler::ScopeLabel label("Quantize/Int8");
const int32 zero_point = op_params.zero_point;
const double scale = static_cast<double>(op_params.scale);
const int flat_size = MatchingFlatSize(input_shape, output_shape);
static constexpr int32 min_val = std::numeric_limits<int8_t>::min();
static constexpr int32 max_val = std::numeric_limits<int8_t>::max();
int i = 0;
#ifdef USE_NEON
const float32x4_t reverse_scale_dup = vdupq_n_f32(1.0f / scale);
const int32x4_t zero_point_dup = vdupq_n_s32(zero_point);
const int32x4_t min_val_dup = vdupq_n_s32(min_val);
const int32x4_t max_val_dup = vdupq_n_s32(max_val);
for (; i <= flat_size - 8; i += 8) {
const float* src_data_ptr = input_data + i;
float32x4_t input_val_0 = vld1q_f32(src_data_ptr);
float32x4_t input_val_1 = vld1q_f32(src_data_ptr + 4);
input_val_0 = vmulq_f32(input_val_0, reverse_scale_dup);
input_val_1 = vmulq_f32(input_val_1, reverse_scale_dup);
int32x4_t casted_val_0 = RoundToNearest(input_val_0);
int32x4_t casted_val_1 = RoundToNearest(input_val_1);
casted_val_0 = vaddq_s32(casted_val_0, zero_point_dup);
casted_val_1 = vaddq_s32(casted_val_1, zero_point_dup);
// Clamp the values to fit the target type's range.
casted_val_0 = vmaxq_s32(casted_val_0, min_val_dup);
casted_val_1 = vmaxq_s32(casted_val_1, min_val_dup);
casted_val_0 = vminq_s32(casted_val_0, max_val_dup);
casted_val_1 = vminq_s32(casted_val_1, max_val_dup);
const int16x4_t narrowed_val_0 = vmovn_s32(casted_val_0);
const int16x4_t narrowed_val_1 = vmovn_s32(casted_val_1);
const int16x8_t combined_val = vcombine_s16(narrowed_val_0, narrowed_val_1);
const int8x8_t combined_val_narrowed = vmovn_s16(combined_val);
vst1_s8(output_data + i, combined_val_narrowed);
}
#endif // NEON
for (; i < flat_size; ++i) {
const float val = input_data[i];
const int32 unclamped =
static_cast<int32>(TfLiteRound(val / scale)) + zero_point;
const int32 clamped = std::min(std::max(unclamped, min_val), max_val);
output_data[i] = clamped;
}
}
template <>
inline void AffineQuantize(const tflite::QuantizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
ruy::profiler::ScopeLabel label("Quantize/Uint8");
const int32 zero_point = op_params.zero_point;
const double scale = static_cast<double>(op_params.scale);
const int flat_size = MatchingFlatSize(input_shape, output_shape);
static constexpr int32 min_val = std::numeric_limits<uint8_t>::min();
static constexpr int32 max_val = std::numeric_limits<uint8_t>::max();
int i = 0;
#ifdef USE_NEON
const float32x4_t reverse_scale_dup = vdupq_n_f32(1.0f / scale);
const int32x4_t zero_point_dup = vdupq_n_s32(zero_point);
const int32x4_t min_val_dup = vdupq_n_s32(min_val);
const int32x4_t max_val_dup = vdupq_n_s32(max_val);
for (; i <= flat_size - 8; i += 8) {
const float* src_data_ptr = input_data + i;
float32x4_t input_val_0 = vld1q_f32(src_data_ptr);
float32x4_t input_val_1 = vld1q_f32(src_data_ptr + 4);
input_val_0 = vmulq_f32(input_val_0, reverse_scale_dup);
input_val_1 = vmulq_f32(input_val_1, reverse_scale_dup);
int32x4_t casted_val_0 = RoundToNearest(input_val_0);
int32x4_t casted_val_1 = RoundToNearest(input_val_1);
casted_val_0 = vaddq_s32(casted_val_0, zero_point_dup);
casted_val_1 = vaddq_s32(casted_val_1, zero_point_dup);
// Clamp the values to fit the target type's range.
casted_val_0 = vmaxq_s32(casted_val_0, min_val_dup);
casted_val_1 = vmaxq_s32(casted_val_1, min_val_dup);
casted_val_0 = vminq_s32(casted_val_0, max_val_dup);
casted_val_1 = vminq_s32(casted_val_1, max_val_dup);
const uint16x4_t narrowed_val_0 = vqmovun_s32(casted_val_0);
const uint16x4_t narrowed_val_1 = vqmovun_s32(casted_val_1);
const uint16x8_t combined_val =
vcombine_u16(narrowed_val_0, narrowed_val_1);
const uint8x8_t combined_val_narrowed = vmovn_u16(combined_val);
vst1_u8(output_data + i, combined_val_narrowed);
}
#endif // NEON
for (; i < flat_size; ++i) {
const float val = input_data[i];
const int32 unclamped =
static_cast<int32>(TfLiteRound(val / scale)) + zero_point;
const int32 clamped = std::min(std::max(unclamped, min_val), max_val);
output_data[i] = clamped;
}
}
template <>
inline void AffineQuantize(const tflite::QuantizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
int16_t* output_data) {
ruy::profiler::ScopeLabel label("Quantize/Int16");
const int32 zero_point = op_params.zero_point;
const double scale = static_cast<double>(op_params.scale);
const int flat_size = MatchingFlatSize(input_shape, output_shape);
static constexpr int32 min_val = std::numeric_limits<int16_t>::min();
static constexpr int32 max_val = std::numeric_limits<int16_t>::max();
int i = 0;
#ifdef USE_NEON
const float32x4_t reverse_scale_dup = vdupq_n_f32(1.0f / scale);
const int32x4_t zero_point_dup = vdupq_n_s32(zero_point);
const int32x4_t min_val_dup = vdupq_n_s32(min_val);
const int32x4_t max_val_dup = vdupq_n_s32(max_val);
for (; i <= flat_size - 8; i += 8) {
const float* src_data_ptr = input_data + i;
float32x4_t input_val_0 = vld1q_f32(src_data_ptr);
float32x4_t input_val_1 = vld1q_f32(src_data_ptr + 4);
input_val_0 = vmulq_f32(input_val_0, reverse_scale_dup);
input_val_1 = vmulq_f32(input_val_1, reverse_scale_dup);
int32x4_t casted_val_0 = RoundToNearest(input_val_0);
int32x4_t casted_val_1 = RoundToNearest(input_val_1);
casted_val_0 = vaddq_s32(casted_val_0, zero_point_dup);
casted_val_1 = vaddq_s32(casted_val_1, zero_point_dup);
// Clamp the values to fit the target type's range.
casted_val_0 = vmaxq_s32(casted_val_0, min_val_dup);
casted_val_1 = vmaxq_s32(casted_val_1, min_val_dup);
casted_val_0 = vminq_s32(casted_val_0, max_val_dup);
casted_val_1 = vminq_s32(casted_val_1, max_val_dup);
const int16x4_t narrowed_val_0 = vmovn_s32(casted_val_0);
const int16x4_t narrowed_val_1 = vmovn_s32(casted_val_1);
vst1_s16(output_data + i, narrowed_val_0);
vst1_s16(output_data + i + 4, narrowed_val_1);
}
#endif // NEON
for (; i < flat_size; ++i) {
const float val = input_data[i];
const int32 unclamped =
static_cast<int32>(TfLiteRound(val / scale)) + zero_point;
const int32 clamped = std::min(std::max(unclamped, min_val), max_val);
output_data[i] = clamped;
}
}
// TODO(b/139252020): Replace GEMMLOWP_NEON with USE_NEON when the bug is fixed.
// The converted versions of gemmlowp::tanh and gemmlowp::logistic, done by
// arm_sse_2_neon.h, produce incorrect results with int16x8_t data types.
#ifdef GEMMLOWP_NEON
inline int16x8x4_t SaturatingRounding(
int16x8_t input_val_0, int16x8_t input_val_1, int16x8_t input_val_2,
int16x8_t input_val_3, int input_left_shift, int input_multiplier) {
// This performs what is expressed in the scalar code as
// const int16 input_val_rescaled = SaturatingRoundingDoublingHighMul(
// static_cast<int16>(input_val_centered * (1 << input_left_shift)),
// static_cast<int16>(input_multiplier));
const int16x8_t left_shift_dup = vdupq_n_s16(input_left_shift);
const int16x8_t input_val_shifted_0 = vshlq_s16(input_val_0, left_shift_dup);
const int16x8_t input_val_shifted_1 = vshlq_s16(input_val_1, left_shift_dup);
const int16x8_t input_val_shifted_2 = vshlq_s16(input_val_2, left_shift_dup);
const int16x8_t input_val_shifted_3 = vshlq_s16(input_val_3, left_shift_dup);
int16x8x4_t result;
result.val[0] = vqrdmulhq_n_s16(input_val_shifted_0, input_multiplier);
result.val[1] = vqrdmulhq_n_s16(input_val_shifted_1, input_multiplier);
result.val[2] = vqrdmulhq_n_s16(input_val_shifted_2, input_multiplier);
result.val[3] = vqrdmulhq_n_s16(input_val_shifted_3, input_multiplier);
return result;
}
// 4-bit fixed point is enough for tanh since tanh(16) is almost same with one,
// considering 7 digits under zero.
inline int16x8x4_t FixedPoint4Logistic(int16x8x4_t input_val) {
// Invoke gemmlowp::logistic on FixedPoint wrapping int16x8_t
using FixedPoint4 = gemmlowp::FixedPoint<int16x8_t, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int16x8_t, 0>;
const FixedPoint4 input_val_f4_0 = FixedPoint4::FromRaw(input_val.val[0]);
const FixedPoint4 input_val_f4_1 = FixedPoint4::FromRaw(input_val.val[1]);
const FixedPoint4 input_val_f4_2 = FixedPoint4::FromRaw(input_val.val[2]);
const FixedPoint4 input_val_f4_3 = FixedPoint4::FromRaw(input_val.val[3]);
// TODO(b/134622898) Implement a low accuracy version of logistic. In this
// method, gemmlowp::tanh spends about 80% of the execution times. The
// current implementation is rougly 12-bit accurate in the 16-bit fixed
// point case. Until reaching to error bounds, there are rooms for
// improvements.
const FixedPoint0 output_val_f0_0 = gemmlowp::logistic(input_val_f4_0);
const FixedPoint0 output_val_f0_1 = gemmlowp::logistic(input_val_f4_1);
const FixedPoint0 output_val_f0_2 = gemmlowp::logistic(input_val_f4_2);
const FixedPoint0 output_val_f0_3 = gemmlowp::logistic(input_val_f4_3);
// Divide by 2^7 as in the scalar code
int16x8x4_t result;
result.val[0] = vrshrq_n_s16(output_val_f0_0.raw(), 7);
result.val[1] = vrshrq_n_s16(output_val_f0_1.raw(), 7);
result.val[2] = vrshrq_n_s16(output_val_f0_2.raw(), 7);
result.val[3] = vrshrq_n_s16(output_val_f0_3.raw(), 7);
return result;
}
// 4-bit fixed point is enough for tanh since tanh(16) is almost same with one,
// considering 11 digits under zero at least.
inline int16x8x4_t FixedPoint4Tanh(int16x8x4_t input_val) {
// Invoke gemmlowp::logistic on FixedPoint wrapping int16x8_t
using FixedPoint4 = gemmlowp::FixedPoint<int16x8_t, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int16x8_t, 0>;
const FixedPoint4 input_val_f4_0 = FixedPoint4::FromRaw(input_val.val[0]);
const FixedPoint4 input_val_f4_1 = FixedPoint4::FromRaw(input_val.val[1]);
const FixedPoint4 input_val_f4_2 = FixedPoint4::FromRaw(input_val.val[2]);
const FixedPoint4 input_val_f4_3 = FixedPoint4::FromRaw(input_val.val[3]);
// TODO(b/134622898) Implement a low accuracy version of logistic. In this
// method, gemmlowp::tanh spends about 80% of the execution times. The
// current implementation is rougly 12-bit accurate in the 16-bit fixed
// point case. Until reaching to error bounds, there are rooms for
// improvements.
const FixedPoint0 output_val_f0_0 = gemmlowp::tanh(input_val_f4_0);
const FixedPoint0 output_val_f0_1 = gemmlowp::tanh(input_val_f4_1);
const FixedPoint0 output_val_f0_2 = gemmlowp::tanh(input_val_f4_2);
const FixedPoint0 output_val_f0_3 = gemmlowp::tanh(input_val_f4_3);
// Divide by 2^7 as in the scalar code
int16x8x4_t result;
result.val[0] = vrshrq_n_s16(output_val_f0_0.raw(), 8);
result.val[1] = vrshrq_n_s16(output_val_f0_1.raw(), 8);
result.val[2] = vrshrq_n_s16(output_val_f0_2.raw(), 8);
result.val[3] = vrshrq_n_s16(output_val_f0_3.raw(), 8);
return result;
}
inline uint8x16x2_t CalculateUnsignedClampingWithRangeBitMasks(
int16x8x2_t input_val, int16x8_t range_radius_dup,
int16x8_t neg_range_radius_dup) {
const uint16x8_t mask_rightclamp_0 =
vcgtq_s16(input_val.val[0], range_radius_dup);
const uint16x8_t mask_rightclamp_1 =
vcgtq_s16(input_val.val[1], range_radius_dup);
const uint16x8_t mask_leftclamp_0 =
vcgeq_s16(input_val.val[0], neg_range_radius_dup);
const uint16x8_t mask_leftclamp_1 =
vcgeq_s16(input_val.val[1], neg_range_radius_dup);
uint8x16x2_t result;
result.val[0] = vcombine_u8(vshrn_n_u16(mask_leftclamp_0, 8),
vshrn_n_u16(mask_leftclamp_1, 8));
result.val[1] = vcombine_u8(vshrn_n_u16(mask_rightclamp_0, 8),
vshrn_n_u16(mask_rightclamp_1, 8));
return result;
}
inline uint8x16x2_t CalculateSignedClampingWithRangeBitMasks(
int16x8x2_t input_val, int16x8_t range_radius_dup,
int16x8_t neg_range_radius_dup) {
const uint16x8_t mask_rightclamp_0 =
vcgtq_s16(input_val.val[0], range_radius_dup);
const uint16x8_t mask_rightclamp_1 =
vcgtq_s16(input_val.val[1], range_radius_dup);
const uint16x8_t mask_leftclamp_0 =
vcltq_s16(input_val.val[0], neg_range_radius_dup);
const uint16x8_t mask_leftclamp_1 =
vcltq_s16(input_val.val[1], neg_range_radius_dup);
uint8x16x2_t result;
result.val[0] = vcombine_u8(vshrn_n_u16(mask_leftclamp_0, 8),
vshrn_n_u16(mask_leftclamp_1, 8));
result.val[1] = vcombine_u8(vshrn_n_u16(mask_rightclamp_0, 8),
vshrn_n_u16(mask_rightclamp_1, 8));
return result;
}
inline void ClampWithRangeAndStore(uint8_t* output_dst, uint8x16_t input_val,
uint8x16x2_t masks_clamp) {
// Store back to memory
vst1q_u8(output_dst, vandq_u8(vorrq_u8(input_val, masks_clamp.val[1]),
masks_clamp.val[0]));
}
inline void ClampWithRangeAndStore(int8_t* output_dst, int8x16_t input_val,
uint8x16x2_t masks_clamp) {
static const int8x16_t max_dup = vdupq_n_s8(127);
static const int8x16_t min_dup = vdupq_n_s8(-128);
// Store back to memory
vst1q_s8(output_dst,
vbslq_s8(masks_clamp.val[1], max_dup,
vbslq_s8(masks_clamp.val[0], min_dup, input_val)));
}
#endif // GEMMLOWP_NEON
inline void Tanh16bitPrecision(const TanhParams& params,
const RuntimeShape& input_shape,
const uint8* input_data,
const RuntimeShape& output_shape,
uint8* output_data) {
// Note that this is almost the exact same code as in Logistic().
ruy::profiler::ScopeLabel label("Tanh/Uint8");
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int16 input_multiplier = static_cast<int16>(params.input_multiplier);
const int16 input_left_shift = static_cast<int16>(params.input_left_shift);
const int size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
int16_t output_zero_point = 128;
// TODO(b/139252020): Replace GEMMLOWP_NEON with USE_NEON when the bug is fixed.
// The converted versions of gemmlowp::tanh and gemmlowp::logistic, done by
// arm_sse_2_neon.h, produce incorrect results with int16x8_t data types.
#ifdef GEMMLOWP_NEON
const int16x8_t range_radius_dup = vdupq_n_s16(input_range_radius);
const int16x8_t neg_range_radius_dup = vdupq_n_s16(-input_range_radius);
const int16x8_t output_zero_point_s16 = vdupq_n_s16(output_zero_point);
// Handle 32 values at a time
for (; c <= size - 32; c += 32) {
// Read input uint8 values, cast to int16 and subtract input_zero_point
using cpu_backend_gemm::detail::Load16AndSubtractZeroPoint;
const int16x8x2_t input_val_centered_0_1 =
Load16AndSubtractZeroPoint(input_data + c, input_zero_point);
const int16x8x2_t input_val_centered_2_3 =
Load16AndSubtractZeroPoint(input_data + c + 16, input_zero_point);
// Prepare the bit masks that we will use at the end to implement the logic
// that was expressed in the scalar code with branching:
// if (input_val_centered < -input_range_radius) {
// output_val = 0;
// } else if (input_val_centered > input_range_radius) {
// output_val = 255;
// } else {
// ...
uint8x16x2_t masks_clamp_0_1 = CalculateUnsignedClampingWithRangeBitMasks(
input_val_centered_0_1, range_radius_dup, neg_range_radius_dup);
uint8x16x2_t masks_clamp_2_3 = CalculateUnsignedClampingWithRangeBitMasks(
input_val_centered_2_3, range_radius_dup, neg_range_radius_dup);
int16x8x4_t input_val_rescaled = SaturatingRounding(
input_val_centered_0_1.val[0], input_val_centered_0_1.val[1],
input_val_centered_2_3.val[0], input_val_centered_2_3.val[1],
input_left_shift, input_multiplier);
int16x8x4_t output_val_s16 = FixedPoint4Tanh(input_val_rescaled);
// Add the output zero point
output_val_s16.val[0] =
vaddq_s16(output_val_s16.val[0], output_zero_point_s16);
output_val_s16.val[1] =
vaddq_s16(output_val_s16.val[1], output_zero_point_s16);
output_val_s16.val[2] =
vaddq_s16(output_val_s16.val[2], output_zero_point_s16);
output_val_s16.val[3] =
vaddq_s16(output_val_s16.val[3], output_zero_point_s16);
// Cast output values to uint8, saturating
uint8x16_t output_val_u8_0_1 = vcombine_u8(
vqmovun_s16(output_val_s16.val[0]), vqmovun_s16(output_val_s16.val[1]));
uint8x16_t output_val_u8_2_3 = vcombine_u8(
vqmovun_s16(output_val_s16.val[2]), vqmovun_s16(output_val_s16.val[3]));
ClampWithRangeAndStore(output_data + c, output_val_u8_0_1, masks_clamp_0_1);
ClampWithRangeAndStore(output_data + c + 16, output_val_u8_2_3,
masks_clamp_2_3);
}
#endif // GEMMLOWP_NEON
// Leftover loop: handle one value at a time with scalar code.
for (; c < size; ++c) {
const uint8 input_val_u8 = input_data[c];
const int16 input_val_centered =
static_cast<int16>(input_val_u8) - input_zero_point;
uint8 output_val;
if (input_val_centered < -input_range_radius) {
output_val = 0;
} else if (input_val_centered > input_range_radius) {
output_val = 255;
} else {
using gemmlowp::SaturatingRoundingDoublingHighMul;
const int16 input_val_rescaled = SaturatingRoundingDoublingHighMul(
static_cast<int16>(input_val_centered * (1 << input_left_shift)),
static_cast<int16>(input_multiplier));
using FixedPoint4 = gemmlowp::FixedPoint<int16, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int16, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::tanh(input_val_f4);
using gemmlowp::RoundingDivideByPOT;
int16 output_val_s16 = RoundingDivideByPOT(output_val_f0.raw(), 8);
output_val_s16 += output_zero_point;
if (output_val_s16 == 256) {
output_val_s16 = 255;
}
TFLITE_DCHECK_GE(output_val_s16, 0);
TFLITE_DCHECK_LE(output_val_s16, 255);
output_val = static_cast<uint8>(output_val_s16);
}
output_data[c] = output_val;
}
}
inline void Tanh16bitPrecision(const TanhParams& params,
const RuntimeShape& input_shape,
const int8* input_data,
const RuntimeShape& output_shape,
int8* output_data) {
// Note that this is almost the exact same code as in Logistic().
ruy::profiler::ScopeLabel label("Tanh/Int8");
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int16 input_multiplier = static_cast<int16>(params.input_multiplier);
const int16 input_left_shift = static_cast<int16>(params.input_left_shift);
const int size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
// TODO(b/139252020): Replace GEMMLOWP_NEON with USE_NEON when the bug is fixed.
// The converted versions of gemmlowp::tanh and gemmlowp::logistic, done by
// arm_sse_2_neon.h, produce incorrect results with int16x8_t data types.
#ifdef GEMMLOWP_NEON
const int16x8_t range_radius_dup = vdupq_n_s16(input_range_radius);
const int16x8_t neg_range_radius_dup = vdupq_n_s16(-input_range_radius);
// Handle 32 values at a time
for (; c <= size - 32; c += 32) {
// Read input int8 values, cast to int16 and subtract input_zero_point
using cpu_backend_gemm::detail::Load16AndSubtractZeroPoint;
const int16x8x2_t input_val_centered_0_1 =
Load16AndSubtractZeroPoint(input_data + c, input_zero_point);
const int16x8x2_t input_val_centered_2_3 =
Load16AndSubtractZeroPoint(input_data + c + 16, input_zero_point);
// Prepare the bit masks that we will use at the end to implement the logic
// that was expressed in the scalar code with branching:
// if (input_val_centered < -input_range_radius) {
// output_val = -128;
// } else if (input_val_centered > input_range_radius) {
// output_val = 127;
// } else {
// ...
uint8x16x2_t masks_clamp_0_1 = CalculateSignedClampingWithRangeBitMasks(
input_val_centered_0_1, range_radius_dup, neg_range_radius_dup);
uint8x16x2_t masks_clamp_2_3 = CalculateSignedClampingWithRangeBitMasks(
input_val_centered_2_3, range_radius_dup, neg_range_radius_dup);
int16x8x4_t input_val_rescaled = SaturatingRounding(
input_val_centered_0_1.val[0], input_val_centered_0_1.val[1],
input_val_centered_2_3.val[0], input_val_centered_2_3.val[1],
input_left_shift, input_multiplier);
int16x8x4_t output_val_s16 = FixedPoint4Tanh(input_val_rescaled);
// Cast output values to uint8, saturating
int8x16_t output_val_s8_0_1 = vcombine_s8(
vqmovn_s16(output_val_s16.val[0]), vqmovn_s16(output_val_s16.val[1]));
int8x16_t output_val_s8_2_3 = vcombine_s8(
vqmovn_s16(output_val_s16.val[2]), vqmovn_s16(output_val_s16.val[3]));
ClampWithRangeAndStore(output_data + c, output_val_s8_0_1, masks_clamp_0_1);
ClampWithRangeAndStore(output_data + c + 16, output_val_s8_2_3,
masks_clamp_2_3);
}
#endif // GEMMLOWP_NEON
// Leftover loop: handle one value at a time with scalar code.
for (; c < size; ++c) {
const int8 input_val_s8 = input_data[c];
const int16 input_val_centered =
static_cast<int16>(input_val_s8) - input_zero_point;
int8 output_val;
if (input_val_centered <= -input_range_radius) {
output_val = -128;
} else if (input_val_centered >= input_range_radius) {
output_val = 127;
} else {
using gemmlowp::SaturatingRoundingDoublingHighMul;
const int16 input_val_rescaled = SaturatingRoundingDoublingHighMul(
static_cast<int16>(input_val_centered * (1 << input_left_shift)),
static_cast<int16>(input_multiplier));
using FixedPoint4 = gemmlowp::FixedPoint<int16, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int16, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::tanh(input_val_f4);
using gemmlowp::RoundingDivideByPOT;
int16 output_val_s16 = RoundingDivideByPOT(output_val_f0.raw(), 8);
if (output_val_s16 == 128) {
output_val_s16 = 127;
}
TFLITE_DCHECK_GE(output_val_s16, -128);
TFLITE_DCHECK_LE(output_val_s16, 127);
output_val = static_cast<int8>(output_val_s16);
}
output_data[c] = output_val;
}
}
inline void Logistic16bitPrecision(const LogisticParams& params,
const RuntimeShape& input_shape,
const uint8* input_data,
const RuntimeShape& output_shape,
uint8* output_data) {
ruy::profiler::ScopeLabel label("Logistic/Uint8");
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int32 input_multiplier = params.input_multiplier;
const int16 input_left_shift = static_cast<int16>(params.input_left_shift);
const int size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
// TODO(b/139252020): Replace GEMMLOWP_NEON with USE_NEON when the bug is fixed.
// The converted versions of gemmlowp::tanh and gemmlowp::logistic, done by
// arm_sse_2_neon.h, produce incorrect results with int16x8_t data types.
#ifdef GEMMLOWP_NEON
const int16x8_t range_radius_dup = vdupq_n_s16(input_range_radius);
const int16x8_t neg_range_radius_dup = vdupq_n_s16(-input_range_radius);
// Handle 32 values at a time
for (; c <= size - 32; c += 32) {
// Read input uint8 values, cast to int16 and subtract input_zero_point
using cpu_backend_gemm::detail::Load16AndSubtractZeroPoint;
const int16x8x2_t input_val_centered_0_1 =
Load16AndSubtractZeroPoint(input_data + c, input_zero_point);
const int16x8x2_t input_val_centered_2_3 =
Load16AndSubtractZeroPoint(input_data + c + 16, input_zero_point);
// Prepare the bit masks that we will use at the end to implement the logic
// that was expressed in the scalar code with branching:
// if (input_val_centered < -input_range_radius) {
// output_val = 0;
// } else if (input_val_centered > input_range_radius) {
// output_val = 255;
// } else {
// ...
uint8x16x2_t masks_clamp_0_1 = CalculateUnsignedClampingWithRangeBitMasks(
input_val_centered_0_1, range_radius_dup, neg_range_radius_dup);
uint8x16x2_t masks_clamp_2_3 = CalculateUnsignedClampingWithRangeBitMasks(
input_val_centered_2_3, range_radius_dup, neg_range_radius_dup);
int16x8x4_t input_val_rescaled = SaturatingRounding(
input_val_centered_0_1.val[0], input_val_centered_0_1.val[1],
input_val_centered_2_3.val[0], input_val_centered_2_3.val[1],
input_left_shift, input_multiplier);
int16x8x4_t output_val_s16 = FixedPoint4Logistic(input_val_rescaled);
// Cast output values to uint8, saturating
uint8x16_t output_val_u8_0_1 = vcombine_u8(
vqmovun_s16(output_val_s16.val[0]), vqmovun_s16(output_val_s16.val[1]));
uint8x16_t output_val_u8_2_3 = vcombine_u8(
vqmovun_s16(output_val_s16.val[2]), vqmovun_s16(output_val_s16.val[3]));
ClampWithRangeAndStore(output_data + c, output_val_u8_0_1, masks_clamp_0_1);
ClampWithRangeAndStore(output_data + c + 16, output_val_u8_2_3,
masks_clamp_2_3);
}
#endif // GEMMLOWP_NEON
// Leftover loop: handle one value at a time with scalar code.
for (; c < size; ++c) {
const uint8 input_val_u8 = input_data[c];
const int16 input_val_centered =
static_cast<int16>(input_val_u8) - input_zero_point;
uint8 output_val;
if (input_val_centered < -input_range_radius) {
output_val = 0;
} else if (input_val_centered > input_range_radius) {
output_val = 255;
} else {
using gemmlowp::SaturatingRoundingDoublingHighMul;
const int16 input_val_rescaled = SaturatingRoundingDoublingHighMul(
static_cast<int16>(input_val_centered * (1 << input_left_shift)),
static_cast<int16>(input_multiplier));
using FixedPoint4 = gemmlowp::FixedPoint<int16, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int16, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::logistic(input_val_f4);
using gemmlowp::RoundingDivideByPOT;
int16 output_val_s16 = RoundingDivideByPOT(output_val_f0.raw(), 7);
if (output_val_s16 == 256) {
output_val_s16 = 255;
}
TFLITE_DCHECK_GE(output_val_s16, 0);
TFLITE_DCHECK_LE(output_val_s16, 255);
output_val = static_cast<uint8>(output_val_s16);
}
output_data[c] = output_val;
}
}
inline void Logistic16bitPrecision(const LogisticParams& params,
const RuntimeShape& input_shape,
const int8* input_data,
const RuntimeShape& output_shape,
int8* output_data) {
ruy::profiler::ScopeLabel label("Logistic/Int8");
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int32 input_multiplier = params.input_multiplier;
const int16 input_left_shift = static_cast<int16>(params.input_left_shift);
const int size = MatchingFlatSize(input_shape, output_shape);
int c = 0;
const int16 output_zero_point = 128;
// TODO(b/139252020): Replace GEMMLOWP_NEON with USE_NEON when the bug is fixed.
// The converted versions of gemmlowp::tanh and gemmlowp::logistic, done by
// arm_sse_2_neon.h, produce incorrect results with int16x8_t data types.
#ifdef GEMMLOWP_NEON
const int16x8_t range_radius_dup = vdupq_n_s16(input_range_radius);
const int16x8_t neg_range_radius_dup = vdupq_n_s16(-input_range_radius);
const int16x8_t output_zero_point_dup = vdupq_n_s16(output_zero_point);
// Handle 32 values at a time
for (; c <= size - 32; c += 32) {
// Read input int8 values, cast to int16 and subtract input_zero_point
using cpu_backend_gemm::detail::Load16AndSubtractZeroPoint;
const int16x8x2_t input_val_centered_0_1 =
Load16AndSubtractZeroPoint(input_data + c, input_zero_point);
const int16x8x2_t input_val_centered_2_3 =
Load16AndSubtractZeroPoint(input_data + c + 16, input_zero_point);
// Prepare the bit masks that we will use at the end to implement the logic
// that was expressed in the scalar code with branching:
// if (input_val_centered < -input_range_radius) {
// output_val = -128;
// } else if (input_val_centered > input_range_radius) {
// output_val = 127;
// } else {
// ...
uint8x16x2_t masks_clamp_0_1 = CalculateSignedClampingWithRangeBitMasks(
input_val_centered_0_1, range_radius_dup, neg_range_radius_dup);
uint8x16x2_t masks_clamp_2_3 = CalculateSignedClampingWithRangeBitMasks(
input_val_centered_2_3, range_radius_dup, neg_range_radius_dup);
int16x8x4_t input_val_rescaled = SaturatingRounding(
input_val_centered_0_1.val[0], input_val_centered_0_1.val[1],
input_val_centered_2_3.val[0], input_val_centered_2_3.val[1],
input_left_shift, input_multiplier);
int16x8x4_t output_val_s16 = FixedPoint4Logistic(input_val_rescaled);
// Substract output zero point.
output_val_s16.val[0] =
vsubq_s16(output_val_s16.val[0], output_zero_point_dup);
output_val_s16.val[1] =
vsubq_s16(output_val_s16.val[1], output_zero_point_dup);
output_val_s16.val[2] =
vsubq_s16(output_val_s16.val[2], output_zero_point_dup);
output_val_s16.val[3] =
vsubq_s16(output_val_s16.val[3], output_zero_point_dup);
// Cast output values to int8, saturating
int8x16_t output_val_s8_0_1 = vcombine_s8(
vqmovn_s16(output_val_s16.val[0]), vqmovn_s16(output_val_s16.val[1]));
int8x16_t output_val_s8_2_3 = vcombine_s8(
vqmovn_s16(output_val_s16.val[2]), vqmovn_s16(output_val_s16.val[3]));
ClampWithRangeAndStore(output_data + c, output_val_s8_0_1, masks_clamp_0_1);
ClampWithRangeAndStore(output_data + c + 16, output_val_s8_2_3,
masks_clamp_2_3);
}
#endif // GEMMLOWP_NEON
// Leftover loop: handle one value at a time with scalar code.
for (; c < size; ++c) {
const int8 input_val_s8 = input_data[c];
const int16 input_val_centered =
static_cast<int16>(input_val_s8) - input_zero_point;
int8 output_val;
if (input_val_centered < -input_range_radius) {
output_val = -128;
} else if (input_val_centered > input_range_radius) {
output_val = 127;
} else {
using gemmlowp::SaturatingRoundingDoublingHighMul;
const int16 input_val_rescaled = SaturatingRoundingDoublingHighMul(
static_cast<int16>(input_val_centered * (1 << input_left_shift)),
static_cast<int16>(input_multiplier));
using FixedPoint4 = gemmlowp::FixedPoint<int16, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int16, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::logistic(input_val_f4);
using gemmlowp::RoundingDivideByPOT;
int16 output_val_s16 = RoundingDivideByPOT(output_val_f0.raw(), 7);
output_val_s16 -= output_zero_point;
if (output_val_s16 == 128) {
output_val_s16 = 127;
}
TFLITE_DCHECK_GE(output_val_s16, -128);
TFLITE_DCHECK_LE(output_val_s16, 127);
output_val = static_cast<int8>(output_val_s16);
}
output_data[c] = output_val;
}
}
// Transpose2D only deals with typical 2D matrix transpose ops.
// Perform transpose by transposing 4x4 blocks of the input, proceeding from
// left to right (down the rows) of the input, and then from top to bottom.
template <typename T>
inline void Transpose2D(const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 2);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
const int d0 = input_shape.DimsData()[0];
const int d1 = input_shape.DimsData()[1];
const int kLines = 4;
const int kSkipSize = (kLines - 1) * d1;
const T* input = input_data;
int i = 0;
for (; i <= d0 - kLines; i += kLines) {
T* output = output_data + i;
const T* input_ptr = input;
optimized_ops_preload_l1_keep(input_ptr);
input_ptr += d1;
optimized_ops_preload_l1_keep(input_ptr);
input_ptr += d1;
optimized_ops_preload_l1_keep(input_ptr);
input_ptr += d1;
optimized_ops_preload_l1_keep(input_ptr);
int j = 0;
for (; j <= d1 - kLines; j += kLines) {
input_ptr = input;
const T a00 = input_ptr[0];
const T a01 = input_ptr[1];
const T a02 = input_ptr[2];
const T a03 = input_ptr[3];
input_ptr += d1;
const T a10 = input_ptr[0];
const T a11 = input_ptr[1];
const T a12 = input_ptr[2];
const T a13 = input_ptr[3];
input_ptr += d1;
const T a20 = input_ptr[0];
const T a21 = input_ptr[1];
const T a22 = input_ptr[2];
const T a23 = input_ptr[3];
input_ptr += d1;
const T a30 = input_ptr[0];
const T a31 = input_ptr[1];
const T a32 = input_ptr[2];
const T a33 = input_ptr[3];
output[0] = a00;
output[1] = a10;
output[2] = a20;
output[3] = a30;
output += d0;
output[0] = a01;
output[1] = a11;
output[2] = a21;
output[3] = a31;
output += d0;
output[0] = a02;
output[1] = a12;
output[2] = a22;
output[3] = a32;
output += d0;
output[0] = a03;
output[1] = a13;
output[2] = a23;
output[3] = a33;
output += d0;
input += kLines;
}
if (j == d1) {
input += kSkipSize;
} else {
for (int p = 0; p < kLines; ++p) {
for (int q = 0; q < d1 - j; ++q) {
*(output + q * d0 + p) = *(input + p * d1 + q);
}
}
input += (d1 - j) + kSkipSize;
}
}
for (; i < d0; ++i) {
T* output = output_data + i;
for (int j = 0; j < d1; ++j) {
*output = *input;
output += d0;
++input;
}
}
}
template <>
inline void Transpose2D(const RuntimeShape& input_shape,
const int32_t* input_data,
const RuntimeShape& output_shape,
int32_t* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 2);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
const int d0 = input_shape.DimsData()[0];
const int d1 = input_shape.DimsData()[1];
#ifdef USE_NEON
const int kLines = 4;
const int kSkipSize = (kLines - 1) * d1;
#endif
const int32_t* input = input_data;
int i = 0;
#ifdef USE_NEON
for (; i <= d0 - kLines; i += kLines) {
int32_t* output = output_data + i;
const int32_t* input_ptr = input;
optimized_ops_preload_l1_keep(input_ptr);
input_ptr += d1;
optimized_ops_preload_l1_keep(input_ptr);
input_ptr += d1;
optimized_ops_preload_l1_keep(input_ptr);
input_ptr += d1;
optimized_ops_preload_l1_keep(input_ptr);
int j = 0;
for (; j <= d1 - kLines; j += kLines) {
input_ptr = input;
int32x4_t a0 = vld1q_s32(input);
input_ptr += d1;
int32x4_t a1 = vld1q_s32(input_ptr);
input_ptr += d1;
int32x4_t a2 = vld1q_s32(input_ptr);
input_ptr += d1;
int32x4_t a3 = vld1q_s32(input_ptr);
int32x4x2_t tmp1 = vuzpq_s32(a0, a2);
int32x4x2_t tmp2 = vuzpq_s32(a1, a3);
int32x4x2_t tmp3 = vtrnq_s32(tmp1.val[0], tmp2.val[0]);
int32x4x2_t tmp4 = vtrnq_s32(tmp1.val[1], tmp2.val[1]);
vst1q_s32(output, tmp3.val[0]);
output += d0;
vst1q_s32(output, tmp4.val[0]);
output += d0;
vst1q_s32(output, tmp3.val[1]);
output += d0;
vst1q_s32(output, tmp4.val[1]);
output += d0;
input += kLines;
}
if (j == d1) {
input += kSkipSize;
} else {
for (int p = 0; p < kLines; ++p) {
for (int q = 0; q < d1 - j; ++q) {
*(output + q * d0 + p) = *(input + p * d1 + q);
}
}
input += (d1 - j) + kSkipSize;
}
}
#endif
for (; i < d0; ++i) {
int32_t* output = output_data + i;
for (int j = 0; j < d1; ++j) {
*output = *input;
output += d0;
++input;
}
}
}
// TODO(b/173718660): see if we can reduce the number
// of lines of code in branching without affecting latency.
template <typename T>
inline void Transpose3D(const TransposeParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
int s1, s2, s3;
s1 = input_shape.Dims(0);
s2 = input_shape.Dims(1);
s3 = input_shape.Dims(2);
int p1, p2, p3;
if (params.perm[0] == 2) {
p1 = 1;
} else if (params.perm[1] == 2) {
p2 = 1;
} else {
p3 = 1;
}
if (params.perm[0] == 1) {
p1 = s3;
} else if (params.perm[1] == 1) {
p2 = s3;
} else {
p3 = s3;
}
if (params.perm[0] == 0) {
p1 = s2 * s3;
} else if (params.perm[1] == 0) {
p2 = s2 * s3;
} else {
p3 = s2 * s3;
}
int o_s[3];
o_s[0] = input_shape.Dims(params.perm[0]);
o_s[1] = input_shape.Dims(params.perm[1]);
o_s[2] = input_shape.Dims(params.perm[2]);
for (int i1 = 0; i1 < o_s[0]; ++i1) {
for (int i2 = 0; i2 < o_s[1]; ++i2) {
for (int i3 = 0; i3 < o_s[2]; ++i3) {
const int i = i1 * p1 + i2 * p2 + i3 * p3;
const int o = i1 * o_s[1] * o_s[2] + i2 * o_s[2] + i3;
output_data[o] = input_data[i];
}
}
}
}
template <typename T, int N>
void TransposeImpl(const TransposeParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
const int dims_cnt = input_shape.DimensionsCount();
int dim0, dim1;
if (transpose_utils::IsTranspose2DApplicable(params, input_shape, &dim0,
&dim1)) {
Transpose2D(RuntimeShape({dim0, dim1}), input_data,
RuntimeShape({dim1, dim0}), output_data);
return;
}
// TODO(b/141217325): notably Eigen is better suited for
// larger inputs whereas Transpose3D is generally
// better for smaller ones.
//
// E.g. on Nexus 5, Eigen is better for size 96^3 and up
// and Transpose3D is better for 72^3 and down.
//
// 96^3 is not mobile-friendly for certain usecases
// (e.g. model used in beam search for seq2seq) but is in others.
// Consider tradeoffs.
if (dims_cnt == 3) {
Transpose3D(params, input_shape, input_data, output_shape, output_data);
return;
}
// Reroute to the reference version if an optimized method for the given data
// is not available.
reference_ops::Transpose<T, N>(params, input_shape, input_data, output_shape,
output_data);
}
template <typename T, int N = 5>
void Transpose(const TransposeParams& unshrinked_params,
const RuntimeShape& unshrinked_input_shape, const T* input_data,
const RuntimeShape& unshrinked_output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Transpose");
const int output_size = unshrinked_output_shape.DimensionsCount();
TFLITE_DCHECK_LE(unshrinked_input_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(output_size, N);
TFLITE_DCHECK_EQ(output_size, unshrinked_params.perm_count);
RuntimeShape shrinked_input_shape = RuntimeShape(unshrinked_input_shape);
RuntimeShape shrinked_output_shape = RuntimeShape(unshrinked_output_shape);
TransposeParams shrinked_params = unshrinked_params;
// Reduce any dimensions that have one size. Lower transpose op usually
// performs better since memory access patterns will be improved.
transpose_utils::RemoveOneSizeDimensions(
&shrinked_input_shape, &shrinked_output_shape, &shrinked_params);
// Handle identity cases.
// TODO(b/140779653): Add an optimization pass in the conversion process to
// remove transpose op nodes where they do nothing like the below one.
bool identical = true;
for (int i = 0; i < shrinked_params.perm_count; ++i) {
if (shrinked_params.perm[i] != i) {
identical = false;
break;
}
}
if (identical) {
memcpy(output_data, input_data,
unshrinked_input_shape.FlatSize() * sizeof(T));
return;
}
// Reduce dimensions by flattening.
if (shrinked_params.perm[0] == 0 && output_size >= 3) {
RuntimeShape non_flatten_input_shape;
RuntimeShape non_flatten_output_shape;
TransposeParams non_flatten_params;
const int total_size = shrinked_input_shape.FlatSize();
const int non_flatten_size = transpose_utils::Flatten(
shrinked_input_shape, shrinked_output_shape, shrinked_params,
&non_flatten_input_shape, &non_flatten_output_shape,
&non_flatten_params);
TFLITE_DCHECK_NE(non_flatten_params.perm[0], 0);
for (int i = 0; i < total_size; i += non_flatten_size) {
TransposeImpl<T, N>(non_flatten_params, non_flatten_input_shape,
input_data + i, non_flatten_output_shape,
output_data + i);
}
return;
}
// Call non-flattened case.
TransposeImpl<T, N>(shrinked_params, shrinked_input_shape, input_data,
shrinked_output_shape, output_data);
}
// Assume input1 & input2 have the same scale & zero point.
inline void MaximumElementwise(int size, const ArithmeticParams& params,
const int8* input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("MaximumElementwiseInt8/8bit");
int i = 0;
#ifdef USE_NEON
for (; i <= size - 16; i += 16) {
const int8x16_t input1_val_original = vld1q_s8(input1_data + i);
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int8x16_t max_data =
vmaxq_s8(input1_val_original, input2_val_original);
vst1q_s8(output_data + i, max_data);
}
#endif // USE_NEON
for (; i < size; ++i) {
const int8 input1_val = input1_data[i];
const int8 input2_val = input2_data[i];
output_data[i] = std::max(input1_val, input2_val);
}
}
inline void MaximumScalarBroadcast(int size, const ArithmeticParams& params,
int8 input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("MaximumScalarBroadcastInt8/8bit");
int i = 0;
#ifdef USE_NEON
const int8x16_t input1_val_original = vdupq_n_s8(input1_data);
for (; i <= size - 16; i += 16) {
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int8x16_t max_data =
vmaxq_s8(input1_val_original, input2_val_original);
vst1q_s8(output_data + i, max_data);
}
#endif // USE_NEON
for (; i < size; ++i) {
const int8 input2_val = input2_data[i];
output_data[i] = std::max(input1_data, input2_val);
}
}
// Assume input1 & input2 have the same scale & zero point.
inline void MinimumElementwise(int size, const ArithmeticParams& params,
const int8* input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("MinimumElementwiseInt8/8bit");
int i = 0;
#ifdef USE_NEON
for (; i <= size - 16; i += 16) {
const int8x16_t input1_val_original = vld1q_s8(input1_data + i);
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int8x16_t min_data =
vminq_s8(input1_val_original, input2_val_original);
vst1q_s8(output_data + i, min_data);
}
#endif // USE_NEON
for (; i < size; ++i) {
const int8 input1_val = input1_data[i];
const int8 input2_val = input2_data[i];
output_data[i] = std::min(input1_val, input2_val);
}
}
inline void MinimumScalarBroadcast(int size, const ArithmeticParams& params,
int8 input1_data, const int8* input2_data,
int8* output_data) {
ruy::profiler::ScopeLabel label("MinimumScalarBroadcastInt8/8bit");
int i = 0;
#ifdef USE_NEON
const int8x16_t input1_val_original = vdupq_n_s8(input1_data);
for (; i <= size - 16; i += 16) {
const int8x16_t input2_val_original = vld1q_s8(input2_data + i);
const int8x16_t min_data =
vminq_s8(input1_val_original, input2_val_original);
vst1q_s8(output_data + i, min_data);
}
#endif // USE_NEON
for (; i < size; ++i) {
const int8 input2_val = input2_data[i];
output_data[i] = std::min(input1_data, input2_val);
}
}
template <typename Op>
inline void BroadcastMaximumDispatch(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8* input1_data,
const RuntimeShape& input2_shape,
const int8* input2_data,
const RuntimeShape& output_shape,
int8* output_data, Op op) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return reference_ops::MaximumMinimumBroadcastSlow(
input1_shape, input1_data, input2_shape, input2_data, output_shape,
output_data, op);
}
BinaryBroadcastFiveFold(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data,
MaximumElementwise, MaximumScalarBroadcast);
}
template <typename Op>
inline void BroadcastMinimumDispatch(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8* input1_data,
const RuntimeShape& input2_shape,
const int8* input2_data,
const RuntimeShape& output_shape,
int8* output_data, Op op) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return reference_ops::MaximumMinimumBroadcastSlow(
input1_shape, input1_data, input2_shape, input2_data, output_shape,
output_data, op);
}
BinaryBroadcastFiveFold(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data,
MinimumElementwise, MinimumScalarBroadcast);
}
template <typename T>
void CumsumImpl(const T* input_data, const RuntimeShape& shape, int axis,
bool exclusive, bool reverse, T* output_data) {
Eigen::array<Eigen::DenseIndex, 3> dims = {1, 1, 1};
for (int i = 0; i < axis; ++i) {
dims[0] *= shape.Dims(i);
}
dims[1] = shape.Dims(axis);
for (int i = axis + 1; i < shape.DimensionsCount(); ++i) {
dims[2] *= shape.Dims(i);
}
typedef Eigen::TensorMap<
Eigen::Tensor<const T, 3, Eigen::RowMajor, Eigen::DenseIndex>,
Eigen::Aligned>
ConstTensor;
typedef Eigen::TensorMap<
Eigen::Tensor<T, 3, Eigen::RowMajor, Eigen::DenseIndex>, Eigen::Aligned>
Tensor;
ConstTensor input(input_data, dims);
Tensor output(output_data, dims);
if (reverse) {
Eigen::array<bool, 3> reverse_idx = {false, true, false};
output =
input.reverse(reverse_idx).cumsum(1, exclusive).reverse(reverse_idx);
} else {
output = input.cumsum(1, exclusive);
}
}
template <typename T>
void CumSum(const T* input_data, const RuntimeShape& shape, int axis,
bool exclusive, bool reverse, T* output_data) {
const int dim = shape.DimensionsCount();
TFLITE_DCHECK_GE(dim, 1);
CumsumImpl<T>(input_data, shape, axis, exclusive, reverse, output_data);
}
inline void PReluScalarBroadcast(int size, const ArithmeticParams& params,
float alpha, const float* input_data,
float* output_data) {
ruy::profiler::ScopeLabel label("PreluScalarBroadcast/float");
int i = 0;
#ifdef USE_NEON
const float32x4_t zero_dup = vdupq_n_f32(0.0f);
const float32x4_t alpha_dup = vdupq_n_f32(alpha);
for (; i <= size - 16; i += 16) {
const float32x4_t input1 = vld1q_f32(input_data + i);
const float32x4_t input2 = vld1q_f32(input_data + i + 4);
const float32x4_t input3 = vld1q_f32(input_data + i + 8);
const float32x4_t input4 = vld1q_f32(input_data + i + 12);
const float32x4_t temp1 = vmulq_f32(input1, alpha_dup);
const float32x4_t temp2 = vmulq_f32(input2, alpha_dup);
const float32x4_t temp3 = vmulq_f32(input3, alpha_dup);
const float32x4_t temp4 = vmulq_f32(input4, alpha_dup);
const uint32x4_t mask1 = vcgeq_f32(input1, zero_dup);
const uint32x4_t mask2 = vcgeq_f32(input2, zero_dup);
const uint32x4_t mask3 = vcgeq_f32(input3, zero_dup);
const uint32x4_t mask4 = vcgeq_f32(input4, zero_dup);
const float32x4_t result1 = vbslq_f32(mask1, input1, temp1);
vst1q_f32(output_data + i, result1);
const float32x4_t result2 = vbslq_f32(mask2, input2, temp2);
vst1q_f32(output_data + i + 4, result2);
const float32x4_t result3 = vbslq_f32(mask3, input3, temp3);
vst1q_f32(output_data + i + 8, result3);
const float32x4_t result4 = vbslq_f32(mask4, input4, temp4);
vst1q_f32(output_data + i + 12, result4);
}
for (; i <= size - 4; i += 4) {
const float32x4_t input = vld1q_f32(input_data + i);
const float32x4_t temp = vmulq_f32(input, alpha_dup);
const uint32x4_t mask = vcgeq_f32(input, zero_dup);
const float32x4_t result = vbslq_f32(mask, input, temp);
vst1q_f32(output_data + i, result);
}
#endif // USE_NEON
for (; i < size; ++i) {
const float input = input_data[i];
output_data[i] = input >= 0.f ? input : input * alpha;
}
}
inline void PReluElementWise(int flat_size, const ArithmeticParams& params,
const float* alpha_data, const float* input_data,
float* output_data) {
ruy::profiler::ScopeLabel label("PreluElementWise/float");
int i = 0;
#ifdef USE_NEON
const float32x4_t zero_dup = vdupq_n_f32(0.0f);
for (; i <= flat_size - 16; i += 16) {
const float32x4_t input1 = vld1q_f32(input_data + i);
const float32x4_t alpha1 = vld1q_f32(alpha_data + i);
const float32x4_t input2 = vld1q_f32(input_data + i + 4);
const float32x4_t alpha2 = vld1q_f32(alpha_data + i + 4);
const float32x4_t input3 = vld1q_f32(input_data + i + 8);
const float32x4_t alpha3 = vld1q_f32(alpha_data + i + 8);
const float32x4_t input4 = vld1q_f32(input_data + i + 12);
const float32x4_t alpha4 = vld1q_f32(alpha_data + i + 12);
const float32x4_t temp1 = vmulq_f32(input1, alpha1);
const float32x4_t temp2 = vmulq_f32(input2, alpha2);
const float32x4_t temp3 = vmulq_f32(input3, alpha3);
const float32x4_t temp4 = vmulq_f32(input4, alpha4);
const uint32x4_t mask1 = vcgeq_f32(input1, zero_dup);
const uint32x4_t mask2 = vcgeq_f32(input2, zero_dup);
const uint32x4_t mask3 = vcgeq_f32(input3, zero_dup);
const uint32x4_t mask4 = vcgeq_f32(input4, zero_dup);
const float32x4_t result1 = vbslq_f32(mask1, input1, temp1);
vst1q_f32(output_data + i, result1);
const float32x4_t result2 = vbslq_f32(mask2, input2, temp2);
vst1q_f32(output_data + i + 4, result2);
const float32x4_t result3 = vbslq_f32(mask3, input3, temp3);
vst1q_f32(output_data + i + 8, result3);
const float32x4_t result4 = vbslq_f32(mask4, input4, temp4);
vst1q_f32(output_data + i + 12, result4);
}
for (; i <= flat_size - 4; i += 4) {
const float32x4_t input = vld1q_f32(input_data + i);
const float32x4_t alpha = vld1q_f32(alpha_data + i);
const float32x4_t temp = vmulq_f32(input, alpha);
const uint32x4_t mask = vcgeq_f32(input, zero_dup);
const float32x4_t result = vbslq_f32(mask, input, temp);
vst1q_f32(output_data + i, result);
}
#endif // USE_NEON
for (; i < flat_size; ++i) {
const float input = input_data[i];
const float alpha = alpha_data[i];
output_data[i] = input >= 0.f ? input : input * alpha;
}
}
inline void BroadcastPReluDispatch(
const ArithmeticParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& alpha_shape,
const float* alpha_data, const RuntimeShape& output_shape,
float* output_data, float (*func)(float, float)) {
if (params.broadcast_category == BroadcastableOpCategory::kGenericBroadcast) {
return reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>(
input_shape, input_data, alpha_shape, alpha_data, output_shape,
output_data, func);
}
BinaryBroadcastFiveFold(params, input_shape, input_data, alpha_shape,
alpha_data, output_shape, output_data,
PReluElementWise, PReluScalarBroadcast);
}
// Returns the index with minimum value within `input_data`.
// If there is a tie, returns the smaller index.
template <typename T>
inline int ArgMinVector(const T* input_data, int size) {
T min_value = input_data[0];
int min_index = 0;
for (int i = 1; i < size; ++i) {
const T curr_value = input_data[i];
if (curr_value < min_value) {
min_value = curr_value;
min_index = i;
}
}
return min_index;
}
// Returns the index with maximum value within `input_data`.
// If there is a tie, returns the smaller index.
template <typename T>
inline int ArgMaxVector(const T* input_data, int size) {
T max_value = input_data[0];
int max_index = 0;
for (int i = 1; i < size; ++i) {
const T curr_value = input_data[i];
if (curr_value > max_value) {
max_value = curr_value;
max_index = i;
}
}
return max_index;
}
template <>
inline int ArgMinVector(const float* input_data, int size) {
int32_t min_index = 0;
float min_value = input_data[0];
int32_t i = 1;
#ifdef USE_NEON
if (size >= 4) {
float32x4_t min_value_f32x4 = vld1q_f32(input_data);
const int32_t index_init[4] = {0, 1, 2, 3};
int32x4_t min_index_s32x4 = vld1q_s32(index_init);
int32x4_t index_s32x4 = min_index_s32x4;
int32x4_t inc = vdupq_n_s32(4);
for (i = 4; i <= size - 4; i += 4) {
// Increase indices by 4.
index_s32x4 = vaddq_s32(index_s32x4, inc);
float32x4_t v = vld1q_f32(&input_data[i]);
uint32x4_t mask = vcltq_f32(v, min_value_f32x4);
min_value_f32x4 = vminq_f32(min_value_f32x4, v);
min_index_s32x4 = vbslq_s32(mask, index_s32x4, min_index_s32x4);
}
// Find min element within float32x4_t.
#ifdef __aarch64__
min_value = vminvq_f32(min_value_f32x4);
#else
float32x2_t min_value_f32x2 = vpmin_f32(vget_low_f32(min_value_f32x4),
vget_high_f32(min_value_f32x4));
min_value_f32x2 = vpmin_f32(min_value_f32x2, min_value_f32x2);
min_value = vget_lane_f32(min_value_f32x2, 0);
#endif // __aarch64__
// Mask indices of non-min values with max int32_t.
float32x4_t fill_min_value_f32x4 = vdupq_n_f32(min_value);
uint32x4_t mask = vceqq_f32(min_value_f32x4, fill_min_value_f32x4);
int32x4_t all_set = vdupq_n_s32(std::numeric_limits<int>::max());
min_index_s32x4 = vbslq_s32(mask, min_index_s32x4, all_set);
// Find min index of min values.
#ifdef __aarch64__
min_index = vminvq_s32(min_index_s32x4);
#else
int32x2_t min_index_s32x2 = vpmin_s32(vget_low_s32(min_index_s32x4),
vget_high_s32(min_index_s32x4));
min_index_s32x2 = vpmin_s32(min_index_s32x2, min_index_s32x2);
min_index = vget_lane_s32(min_index_s32x2, 0);
#endif // __aarch64__
}
#endif // USE_NEON
// Leftover loop.
for (; i < size; ++i) {
const float curr_value = input_data[i];
if (curr_value < min_value) {
min_value = curr_value;
min_index = i;
}
}
return min_index;
}
template <>
inline int ArgMaxVector(const float* input_data, int size) {
int32_t max_index = 0;
float max_value = input_data[0];
int32_t i = 1;
#ifdef USE_NEON
if (size >= 4) {
float32x4_t max_value_f32x4 = vld1q_f32(input_data);
const int32_t index_init[4] = {0, 1, 2, 3};
int32x4_t max_index_s32x4 = vld1q_s32(index_init);
int32x4_t index_s32x4 = max_index_s32x4;
int32x4_t inc = vdupq_n_s32(4);
for (i = 4; i <= size - 4; i += 4) {
// Increase indices by 4.
index_s32x4 = vaddq_s32(index_s32x4, inc);
float32x4_t v = vld1q_f32(&input_data[i]);
uint32x4_t mask = vcgtq_f32(v, max_value_f32x4);
max_value_f32x4 = vmaxq_f32(max_value_f32x4, v);
max_index_s32x4 = vbslq_s32(mask, index_s32x4, max_index_s32x4);
}
// Find max element within float32x4_t.
#ifdef __aarch64__
max_value = vmaxvq_f32(max_value_f32x4);
#else
float32x2_t max_value_f32x2 = vpmax_f32(vget_low_f32(max_value_f32x4),
vget_high_f32(max_value_f32x4));
max_value_f32x2 = vpmax_f32(max_value_f32x2, max_value_f32x2);
max_value = vget_lane_f32(max_value_f32x2, 0);
#endif // __aarch64__
// Mask indices of non-max values with max int32_t.
float32x4_t fill_max_value_f32x4 = vdupq_n_f32(max_value);
uint32x4_t mask = vceqq_f32(max_value_f32x4, fill_max_value_f32x4);
int32x4_t all_set = vdupq_n_s32(std::numeric_limits<int>::max());
max_index_s32x4 = vbslq_s32(mask, max_index_s32x4, all_set);
// Find min index of max values.
#ifdef __aarch64__
max_index = vminvq_s32(max_index_s32x4);
#else
int32x2_t max_index_s32x2 = vpmin_s32(vget_low_s32(max_index_s32x4),
vget_high_s32(max_index_s32x4));
max_index_s32x2 = vpmin_s32(max_index_s32x2, max_index_s32x2);
max_index = vget_lane_s32(max_index_s32x2, 0);
#endif // __aarch64__
}
#endif // USE_NEON
// Leftover loop.
for (; i < size; ++i) {
const float curr_value = input_data[i];
if (curr_value > max_value) {
max_value = curr_value;
max_index = i;
}
}
return max_index;
}
template <>
inline int ArgMaxVector(const int8_t* input_data, int size) {
int32_t max_index = 0;
int8_t max_value = input_data[0];
int32_t i = 0;
#ifdef USE_NEON
constexpr int VECTOR_SIZE = 16;
if (size >= VECTOR_SIZE) {
int8x16_t max_value_s8x16;
for (; i <= size - VECTOR_SIZE; i += VECTOR_SIZE) {
max_value_s8x16 = vld1q_s8(input_data + i);
int8_t max_from_vec;
#ifdef __aarch64__
max_from_vec = vmaxvq_s8(max_value_s8x16);
#else // 32 bit
int8x8_t max_val_s8x8 =
vpmax_s8(vget_low_s8(max_value_s8x16), vget_high_s8(max_value_s8x16));
max_val_s8x8 = vpmax_s8(max_val_s8x8, max_val_s8x8);
max_val_s8x8 = vpmax_s8(max_val_s8x8, max_val_s8x8);
max_val_s8x8 = vpmax_s8(max_val_s8x8, max_val_s8x8);
max_from_vec = vget_lane_s8(max_val_s8x8, 0);
#endif // __aarch64__
if (max_from_vec > max_value) {
max_value = max_from_vec;
max_index = i;
}
}
}
for (int start_idx = max_index; start_idx < max_index + VECTOR_SIZE;
start_idx++) {
if (input_data[start_idx] == max_value) {
max_index = start_idx;
break;
}
}
#endif // USE_NEON
// Leftover loop.
for (; i < size; ++i) {
const int8_t curr_value = input_data[i];
if (curr_value > max_value) {
max_value = curr_value;
max_index = i;
}
}
return max_index;
}
template <>
inline int ArgMaxVector(const uint8_t* input_data, int size) {
int32_t max_index = 0;
uint8_t max_value = input_data[0];
int32_t i = 0;
#ifdef USE_NEON
constexpr int VECTOR_SIZE = 16;
if (size >= VECTOR_SIZE) {
uint8x16_t max_value_u8x16;
for (; i <= size - VECTOR_SIZE; i += VECTOR_SIZE) {
max_value_u8x16 = vld1q_u8(input_data + i);
uint8_t max_from_vec;
#ifdef __aarch64__
max_from_vec = vmaxvq_u8(max_value_u8x16);
#else // 32 bit
uint8x8_t max_val_u8x8 =
vpmax_u8(vget_low_u8(max_value_u8x16), vget_high_u8(max_value_u8x16));
max_val_u8x8 = vpmax_u8(max_val_u8x8, max_val_u8x8);
max_val_u8x8 = vpmax_u8(max_val_u8x8, max_val_u8x8);
max_val_u8x8 = vpmax_u8(max_val_u8x8, max_val_u8x8);
max_from_vec = vget_lane_u8(max_val_u8x8, 0);
#endif // __aarch64__
if (max_from_vec > max_value) {
max_value = max_from_vec;
max_index = i;
}
}
}
for (int start_idx = max_index; start_idx < max_index + VECTOR_SIZE;
start_idx++) {
if (input_data[start_idx] == max_value) {
max_index = start_idx;
break;
}
}
#endif // USE_NEON
// Leftover loop.
for (; i < size; ++i) {
const uint8_t curr_value = input_data[i];
if (curr_value > max_value) {
max_value = curr_value;
max_index = i;
}
}
return max_index;
}
// Specializes ArgMinMax function with axis=dims-1.
// In this case, ArgMinMax reduction is applied on contiguous memory.
template <typename T1, typename T2, bool is_arg_max>
inline void ArgMinMaxLastAxis(const RuntimeShape& input_shape,
const T1* input_data,
const RuntimeShape& output_shape,
T2* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 2);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 1);
TFLITE_DCHECK_EQ(input_shape.Dims(0), output_shape.Dims(0));
int outer_size = input_shape.Dims(0);
int axis_size = input_shape.Dims(1);
for (int outer = 0; outer < outer_size; ++outer) {
if (is_arg_max) {
output_data[outer] = static_cast<T2>(
ArgMaxVector<T1>(input_data + outer * axis_size, axis_size));
} else {
output_data[outer] = static_cast<T2>(
ArgMinVector<T1>(input_data + outer * axis_size, axis_size));
}
}
}
template <typename T1, typename T2, typename T3>
inline void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data, const bool is_arg_max) {
ruy::profiler::ScopeLabel label("ArgMinMax");
TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0);
TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1,
output_shape.DimensionsCount());
int axis = input2_data[0];
if (axis < 0) {
axis += input1_shape.DimensionsCount();
}
const int axis_size = input1_shape.Dims(axis);
int outer_size = 1;
for (int i = 0; i < axis; ++i) {
TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i));
outer_size *= input1_shape.Dims(i);
}
int inner_size = 1;
const int dims_count = input1_shape.DimensionsCount();
for (int i = axis + 1; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1));
inner_size *= input1_shape.Dims(i);
}
// Call specialized function when axis=dims-1. So far, only float32 is
// optimized so reroute to specialized function only when T1 is float32.
if (inner_size == 1 &&
(std::is_same<T1, float>::value || std::is_same<T1, int8_t>::value ||
std::is_same<T1, uint8_t>::value)) {
if (is_arg_max) {
ArgMinMaxLastAxis<T1, T2, /*is_arg_max=*/true>(
{outer_size, axis_size}, input1_data, {outer_size}, output_data);
} else {
ArgMinMaxLastAxis<T1, T2, /*is_arg_max=*/false>(
{outer_size, axis_size}, input1_data, {outer_size}, output_data);
}
return;
}
reference_ops::ArgMinMax(input1_shape, input1_data, input2_data, output_shape,
output_data, is_arg_max);
}
template <typename T1, typename T2, typename T3>
void ArgMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data) {
ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data,
/*is_arg_max=*/true);
}
// Convenience version that allows, for example, generated-code calls to be
// the same as other binary ops.
// For backward compatibility, reference_ops has ArgMax function.
template <typename T1, typename T2, typename T3>
inline void ArgMax(const RuntimeShape& input1_shape, const T1* input1_data,
const RuntimeShape& input2_shape, const T3* input2_data,
const RuntimeShape& output_shape, T2* output_data) {
// Drop shape of second input: not needed.
ArgMax(input1_shape, input1_data, input2_data, output_shape, output_data);
}
inline void Conv3D(const Conv3DParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape,
float* im2col_data,
const RuntimeShape& transposed_filter_shape,
float* transposed_filter_data,
CpuBackendContext* cpu_backend_context) {
const int stride_depth = params.stride_depth;
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
const int dilation_depth_factor = params.dilation_depth;
const int dilation_height_factor = params.dilation_height;
const int dilation_width_factor = params.dilation_width;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5);
ruy::profiler::ScopeLabel label("Conv3D");
// NB: the float 0.0f value is represented by all zero bytes.
const uint8 float_zero_byte = 0x00;
const float* gemm_input_data = nullptr;
const RuntimeShape* gemm_input_shape = nullptr;
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_depth = filter_shape.Dims(0);
const bool need_dilated_im2col = dilation_width_factor != 1 ||
dilation_height_factor != 1 ||
dilation_depth_factor != 1;
const bool need_im2col = stride_depth != 1 || stride_height != 1 ||
stride_width != 1 || filter_depth != 1 ||
filter_height != 1 || filter_width != 1;
if (need_dilated_im2col) {
DilatedIm2col3D(params, filter_depth, filter_height, filter_width,
float_zero_byte, input_shape, input_data, im2col_shape,
im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else if (need_im2col) {
TFLITE_DCHECK(im2col_data);
Im2col3D(params, filter_depth, filter_height, filter_width, float_zero_byte,
input_shape, input_data, im2col_shape, im2col_data);
gemm_input_data = im2col_data;
gemm_input_shape = &im2col_shape;
} else {
TFLITE_DCHECK(!im2col_data);
gemm_input_data = input_data;
gemm_input_shape = &input_shape;
}
// Transpose the filter tensor.
TransposeParams transpose_params;
transpose_params.perm_count = 5;
transpose_params.perm[0] = 4;
transpose_params.perm[1] = 0;
transpose_params.perm[2] = 1;
transpose_params.perm[3] = 2;
transpose_params.perm[4] = 3;
Transpose<float, 5>(transpose_params, filter_shape, filter_data,
transposed_filter_shape, transposed_filter_data);
const int gemm_input_dims = gemm_input_shape->DimensionsCount();
int m = FlatSizeSkipDim(*gemm_input_shape, gemm_input_dims - 1);
int n = output_shape.Dims(4);
int k = gemm_input_shape->Dims(gemm_input_dims - 1);
cpu_backend_gemm::MatrixParams<float> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = n;
lhs_params.cols = k;
cpu_backend_gemm::MatrixParams<float> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = k;
rhs_params.cols = m;
cpu_backend_gemm::MatrixParams<float> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = n;
dst_params.cols = m;
cpu_backend_gemm::GemmParams<float, float> gemm_params;
gemm_params.bias = bias_data;
gemm_params.clamp_min = output_activation_min;
gemm_params.clamp_max = output_activation_max;
cpu_backend_gemm::Gemm(lhs_params, transposed_filter_data, rhs_params,
gemm_input_data, dst_params, output_data, gemm_params,
cpu_backend_context);
}
} // namespace optimized_ops
} // namespace tflite
#if defined OPTIMIZED_OPS_H__IGNORE_DEPRECATED_DECLARATIONS
#undef OPTIMIZED_OPS_H__IGNORE_DEPRECATED_DECLARATIONS
#pragma GCC diagnostic pop
#endif
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_OPTIMIZED_OPS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/optimized_ops.h | C++ | apache-2.0 | 362,099 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_RESIZE_BILINEAR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_RESIZE_BILINEAR_H_
#include <stdint.h>
#include <sys/types.h>
#include <cmath>
#include <limits>
#include <memory>
#include <type_traits>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/tensor.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
namespace resize_bilinear {
#ifdef USE_NEON
// These utility functions are split off not just for convenience. Most
// incoporate packing or unpacking of data.
//
// (a) Optimizations can be tried experimentally.
// (b) Optimizations can be specialized for architectures, eg Intel vs ARM.
inline int16x8_t Load8IntoLowerS16(const uint8* data_ptr) {
return vreinterpretq_s16_u16(vmovl_u8(vld1_u8(data_ptr)));
}
inline uint16x8_t Move8IntoUpperU16(const uint8x8_t vec_val) {
// Alternatively one could zip with a zero vector.
return vshlq_n_u16(vmovl_u8(vec_val), 8);
}
inline uint16x8_t Load8IntoUpperU16(const uint8* data_ptr) {
return Move8IntoUpperU16(vld1_u8(data_ptr));
}
// Extract upper 8 bits from each 16-bit integer in vector registers. This is
// performed for a pair, because instructions often work on pairs.
inline void PairExtractUpper(const uint16x8_t accum_0, const uint16x8_t accum_1,
uint8x8_t* res_0, uint8x8_t* res_1) {
uint8x16x2_t unzipped =
vuzpq_u8(vreinterpretq_u8_u16(accum_0), vreinterpretq_u8_u16(accum_1));
*res_0 = vget_low_u8(unzipped.val[1]);
*res_1 = vget_high_u8(unzipped.val[1]);
}
// This is an exceptional definition.
//
// Modify int16x8_t, adding operators.
//
// There are exceptional circumstances that make it reasonable to write code
// on vector types for quantized resize bilinear in *some cases*.
//
// (a) In exact quant resize bilinear, it should be possible to guarantee that
// arithmetic never overflows.
// (b) When the resize scaling is 2 or 4 or 8 it is possible to guarantee
// exact accumulation and exact incrementation.
// (c) In quant resize bilinear the choice of unsigned vs signed accumulation
// and saturated vs unsaturated arithmetic is often unimportant.
//
// This pattern simplifies the code considerably. This pattern should not be
// used more widely in code since it can hide important numerical detail.
//
// DO NOT add to this any "class-like" methods: only those that do no more than
// redirecting operators to specific intrinsics functions.
struct op_int16x8_t {
inline op_int16x8_t() = default;
inline explicit op_int16x8_t(const int16x8_t& initial_val) {
val = initial_val;
}
inline op_int16x8_t& operator=(const int16x8_t& new_val) {
val = new_val;
return *this;
}
inline op_int16x8_t operator+=(const op_int16x8_t& add_val) {
val = vaddq_s16(val, add_val.val);
return *this;
}
inline op_int16x8_t operator-=(const op_int16x8_t& sub_val) {
val = vsubq_s16(val, sub_val.val);
return *this;
}
// This really selects vshlq_n_s16, but requires a longer implementation to
// convert the shift argument back to a constant. In some compiles are macros
// requiring constant args.
inline op_int16x8_t operator<<=(int32 left_shift) {
switch (left_shift) {
case 1:
val = vshlq_n_s16(val, 1);
break;
case 4:
val = vshlq_n_s16(val, 4);
break;
case 8:
val = vshlq_n_s16(val, 8);
break;
default:
TFLITE_CHECK(false);
break;
}
return *this;
}
// This really selects vshrq_n_u16, but requires a longer implementation to
// convert the shift argument back to a constant. In some compiles are macros
// requiring constant args.
inline op_int16x8_t operator>>=(int32 right_shift) {
switch (right_shift) {
case 1:
val = vshrq_n_s16(val, 1);
break;
case 4:
val = vshrq_n_s16(val, 4);
break;
case 8:
val = vshrq_n_s16(val, 8);
break;
default:
TFLITE_CHECK(false);
break;
}
return *this;
}
friend inline op_int16x8_t operator+(op_int16x8_t lhs,
const op_int16x8_t& rhs) {
lhs += rhs;
return lhs;
}
friend inline op_int16x8_t operator-(op_int16x8_t lhs,
const op_int16x8_t& rhs) {
lhs -= rhs;
return lhs;
}
friend inline op_int16x8_t operator<<(op_int16x8_t lhs, int32 left_shift) {
lhs <<= left_shift;
return lhs;
}
friend inline op_int16x8_t operator>>(op_int16x8_t lhs, int32 right_shift) {
lhs >>= right_shift;
return lhs;
}
int16x8_t val;
};
// This is an exceptional definition.
//
// Modify uint16x8_t, adding operators.
//
// Important: See above notes on op_int16x8_t.
struct op_uint16x8_t {
inline op_uint16x8_t() = default;
inline explicit op_uint16x8_t(const uint16x8_t initial_val) {
val = initial_val;
}
inline op_uint16x8_t& operator=(const uint16x8_t& new_val) {
val = new_val;
return *this;
}
inline op_uint16x8_t operator+=(const op_int16x8_t& add_val) {
val = vaddq_u16(val, vreinterpretq_u16_s16(add_val.val));
return *this;
}
inline op_uint16x8_t operator-=(const op_int16x8_t& sub_val) {
val = vsubq_u16(val, vreinterpretq_u16_s16(sub_val.val));
return *this;
}
// This really selects vshlq_n_s16, but requires a longer implementation to
// convert the shift argument back to a constant. In some compiles are macros
// requiring constant args.
inline op_uint16x8_t operator<<=(int32 left_shift) {
switch (left_shift) {
case 1:
val = vshlq_n_u16(val, 1);
break;
case 4:
val = vshlq_n_u16(val, 4);
break;
case 8:
val = vshlq_n_u16(val, 8);
break;
default:
TFLITE_CHECK(false);
break;
}
return *this;
}
// This really selects vshrq_n_u16, but requires a longer implementation to
// convert the shift argument back to a constant. In some compiles are macros
// requiring constant args.
inline op_uint16x8_t operator>>=(int32 right_shift) {
switch (right_shift) {
case 1:
val = vshrq_n_u16(val, 1);
break;
case 4:
val = vshrq_n_u16(val, 4);
break;
case 8:
val = vshrq_n_u16(val, 8);
break;
default:
TFLITE_CHECK(false);
break;
}
return *this;
}
friend inline op_uint16x8_t operator+(op_uint16x8_t lhs,
const op_int16x8_t& rhs) {
lhs += rhs;
return lhs;
}
friend inline op_uint16x8_t operator-(op_uint16x8_t lhs,
const op_int16x8_t& rhs) {
lhs -= rhs;
return lhs;
}
friend inline op_uint16x8_t operator<<(op_uint16x8_t lhs, int32 left_shift) {
lhs <<= left_shift;
return lhs;
}
friend inline op_uint16x8_t operator>>(op_uint16x8_t lhs, int32 right_shift) {
lhs >>= right_shift;
return lhs;
}
uint16x8_t val;
};
inline op_uint16x8_t VReinterpretQU16S16(const op_int16x8_t& other) {
op_uint16x8_t ret_val(vreinterpretq_u16_s16(other.val));
return ret_val;
}
#endif // USE_NEON
// Optimized resize-bilinear for the special case where the scaling is x8 in
// width and height, and where we can operate on depth-8 blocks at a time. So
// the output blocks are 8x8x8 in width-height-depth.
//
// This optimization is for the half_pixel_centers == true version, for uint8.
// There are versions for NEON and non-NEON compilation.
inline void ResizeBilinear888Uint8(int32 batches, int32 input_height,
int32 input_width, int32 depth,
const uint8* input_data,
uint8* output_data) {
TFLITE_DCHECK_GE(input_height, 1);
TFLITE_DCHECK_GE(input_width, 1);
TFLITE_DCHECK_EQ(depth % 8, 0);
const int32 input_row_stride = input_width * depth;
const int32 output_row_stride = input_row_stride * 8;
for (int b = 0; b < batches; ++b) {
const uint8* input_base_ptr =
input_data + b * input_row_stride * input_height;
uint8* output_base_ptr =
output_data + b * output_row_stride * input_height * 8;
#ifdef USE_NEON
for (int c_block = 0; c_block < depth; c_block += 8) {
op_uint16x8_t accum_c_v;
// Top-left margin corner.
{
uint8x8_t output_data = vld1_u8(&input_base_ptr[c_block]);
vst1_u8(&output_base_ptr[c_block], output_data);
vst1_u8(&output_base_ptr[c_block + depth], output_data);
vst1_u8(&output_base_ptr[c_block + depth * 2], output_data);
vst1_u8(&output_base_ptr[c_block + depth * 3], output_data);
// Accumulate in 8.8 representation, pre-adding 0.5 for later rounding.
accum_c_v = vaddq_u16(Move8IntoUpperU16(output_data), vdupq_n_u16(128));
}
// Top-centre margin.
op_int16x8_t wdelta_c_v;
op_int16x8_t wdelta_twice_c_v;
for (int j = 0; j < (input_width - 1); ++j) {
{
uint8x8_t output_data_alt;
uint8x8_t output_data;
const op_int16x8_t tl_val(
Load8IntoLowerS16(&input_base_ptr[c_block + depth * j]));
const op_int16x8_t tr_val(
Load8IntoLowerS16(&input_base_ptr[c_block + depth * (j + 1)]));
wdelta_c_v = (tr_val - tl_val) << 4;
wdelta_twice_c_v = wdelta_c_v << 1;
op_uint16x8_t accum_c_v_alt = accum_c_v + wdelta_c_v;
accum_c_v = accum_c_v_alt + wdelta_twice_c_v;
PairExtractUpper(accum_c_v_alt.val, accum_c_v.val, &output_data_alt,
&output_data);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth * 4],
output_data_alt);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth + depth * 4],
output_data);
for (int p = 2; p < 8; p += 2) {
accum_c_v_alt = accum_c_v + wdelta_twice_c_v;
accum_c_v = accum_c_v_alt + wdelta_twice_c_v;
PairExtractUpper(accum_c_v_alt.val, accum_c_v.val, &output_data_alt,
&output_data);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_alt);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth * (p + 1) +
depth * 4],
output_data);
}
accum_c_v += wdelta_c_v;
}
}
// Top-right margin corner.
{
uint8x8_t output_data_discard;
uint8x8_t output_data;
// Accumulations have pre-added 0.5 for rounding, but that is just
// discarded and this just avoids re-loading.
PairExtractUpper(accum_c_v.val, accum_c_v.val, &output_data,
&output_data_discard);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4],
output_data);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth],
output_data);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * 2],
output_data);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * 3],
output_data);
}
}
// Fill out remainder of top margin.
std::memcpy(output_base_ptr + output_row_stride, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 2, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 3, output_base_ptr,
output_row_stride * sizeof(uint8));
output_base_ptr += output_row_stride * 4;
// Main rows.
for (int k = 0; k < (input_height - 1); ++k) {
for (int c_block = 0; c_block < depth; c_block += 8) {
uint8* output_base_ptr_0 = output_base_ptr;
uint8* output_base_ptr_1;
uint8* output_base_ptr_2;
uint8* output_base_ptr_3;
uint8* output_base_ptr_4;
uint8* output_base_ptr_5;
uint8* output_base_ptr_6;
uint8* output_base_ptr_7;
op_uint16x8_t accum_0_c_v;
op_uint16x8_t accum_1_c_v;
op_uint16x8_t accum_2_c_v;
op_uint16x8_t accum_3_c_v;
op_uint16x8_t accum_4_c_v;
op_uint16x8_t accum_5_c_v;
op_uint16x8_t accum_6_c_v;
op_uint16x8_t accum_7_c_v;
op_int16x8_t hdelta_c_v;
op_int16x8_t hdelta_twice_c_v;
// Left margin for 8 rows.
{
uint8x8_t output_data_0_c;
uint8x8_t output_data_1_c;
uint8x8_t output_data_2_c;
uint8x8_t output_data_3_c;
uint8x8_t output_data_4_c;
uint8x8_t output_data_5_c;
uint8x8_t output_data_6_c;
uint8x8_t output_data_7_c;
const op_int16x8_t tl_val(
Load8IntoLowerS16(&input_base_ptr[c_block]));
const op_int16x8_t bl_val(
Load8IntoLowerS16(&input_base_ptr[c_block + input_row_stride]));
hdelta_c_v = (bl_val - tl_val) << 4;
// Accumulate in 8.8 representation, pre-adding 0.5 for later
// rounding.
accum_0_c_v = VReinterpretQU16S16(tl_val << 8);
accum_0_c_v = vaddq_u16(accum_0_c_v.val, vdupq_n_u16(128));
hdelta_twice_c_v = hdelta_c_v << 1;
accum_0_c_v += hdelta_c_v;
accum_1_c_v = accum_0_c_v + hdelta_twice_c_v;
PairExtractUpper(accum_0_c_v.val, accum_1_c_v.val, &output_data_0_c,
&output_data_1_c);
vst1_u8(&output_base_ptr_0[c_block], output_data_0_c);
vst1_u8(&output_base_ptr_0[c_block + depth], output_data_0_c);
vst1_u8(&output_base_ptr_0[c_block + depth * 2], output_data_0_c);
vst1_u8(&output_base_ptr_0[c_block + depth * 3], output_data_0_c);
output_base_ptr_1 = output_base_ptr_0 + output_row_stride;
vst1_u8(&output_base_ptr_1[c_block], output_data_1_c);
vst1_u8(&output_base_ptr_1[c_block + depth], output_data_1_c);
vst1_u8(&output_base_ptr_1[c_block + depth * 2], output_data_1_c);
vst1_u8(&output_base_ptr_1[c_block + depth * 3], output_data_1_c);
//
output_base_ptr_2 = output_base_ptr_1 + output_row_stride;
accum_2_c_v = accum_1_c_v + hdelta_twice_c_v;
accum_3_c_v = accum_2_c_v + hdelta_twice_c_v;
PairExtractUpper(accum_2_c_v.val, accum_3_c_v.val, &output_data_2_c,
&output_data_3_c);
vst1_u8(&output_base_ptr_2[c_block], output_data_2_c);
vst1_u8(&output_base_ptr_2[c_block + depth], output_data_2_c);
vst1_u8(&output_base_ptr_2[c_block + depth * 2], output_data_2_c);
vst1_u8(&output_base_ptr_2[c_block + depth * 3], output_data_2_c);
output_base_ptr_3 = output_base_ptr_2 + output_row_stride;
vst1_u8(&output_base_ptr_3[c_block], output_data_3_c);
vst1_u8(&output_base_ptr_3[c_block + depth], output_data_3_c);
vst1_u8(&output_base_ptr_3[c_block + depth * 2], output_data_3_c);
vst1_u8(&output_base_ptr_3[c_block + depth * 3], output_data_3_c);
//
output_base_ptr_4 = output_base_ptr_3 + output_row_stride;
accum_4_c_v = accum_3_c_v + hdelta_twice_c_v;
accum_5_c_v = accum_4_c_v + hdelta_twice_c_v;
PairExtractUpper(accum_4_c_v.val, accum_5_c_v.val, &output_data_4_c,
&output_data_5_c);
vst1_u8(&output_base_ptr_4[c_block], output_data_4_c);
vst1_u8(&output_base_ptr_4[c_block + depth], output_data_4_c);
vst1_u8(&output_base_ptr_4[c_block + depth * 2], output_data_4_c);
vst1_u8(&output_base_ptr_4[c_block + depth * 3], output_data_4_c);
output_base_ptr_5 = output_base_ptr_4 + output_row_stride;
vst1_u8(&output_base_ptr_5[c_block], output_data_5_c);
vst1_u8(&output_base_ptr_5[c_block + depth], output_data_5_c);
vst1_u8(&output_base_ptr_5[c_block + depth * 2], output_data_5_c);
vst1_u8(&output_base_ptr_5[c_block + depth * 3], output_data_5_c);
//
output_base_ptr_6 = output_base_ptr_5 + output_row_stride;
accum_6_c_v = accum_5_c_v + hdelta_twice_c_v;
accum_7_c_v = accum_6_c_v + hdelta_twice_c_v;
PairExtractUpper(accum_6_c_v.val, accum_7_c_v.val, &output_data_6_c,
&output_data_7_c);
vst1_u8(&output_base_ptr_6[c_block], output_data_6_c);
vst1_u8(&output_base_ptr_6[c_block + depth], output_data_6_c);
vst1_u8(&output_base_ptr_6[c_block + depth * 2], output_data_6_c);
vst1_u8(&output_base_ptr_6[c_block + depth * 3], output_data_6_c);
output_base_ptr_7 = output_base_ptr_6 + output_row_stride;
vst1_u8(&output_base_ptr_7[c_block], output_data_7_c);
vst1_u8(&output_base_ptr_7[c_block + depth], output_data_7_c);
vst1_u8(&output_base_ptr_7[c_block + depth * 2], output_data_7_c);
vst1_u8(&output_base_ptr_7[c_block + depth * 3], output_data_7_c);
}
// Main central body.
op_int16x8_t wdelta_c;
op_int16x8_t wdelta_twice_c;
op_int16x8_t hwdelta_c;
op_int16x8_t hwdelta_twice_c;
op_int16x8_t incr_0_c;
op_int16x8_t incr_1_c;
op_int16x8_t incr_2_c;
op_int16x8_t incr_3_c;
op_int16x8_t incr_4_c;
op_int16x8_t incr_5_c;
op_int16x8_t incr_6_c;
op_int16x8_t incr_7_c;
uint8x8_t output_data_0_c;
uint8x8_t output_data_1_c;
uint8x8_t output_data_2_c;
uint8x8_t output_data_3_c;
uint8x8_t output_data_4_c;
uint8x8_t output_data_5_c;
uint8x8_t output_data_6_c;
uint8x8_t output_data_7_c;
for (int j = 0; j < (input_width - 1); ++j) {
// output_base_ptr_0 = output_base_ptr;
// output_base_ptr_1 = output_base_ptr_0 + output_row_stride; ETC
{
const op_int16x8_t tl_val(
Load8IntoLowerS16(&input_base_ptr[c_block + depth * j]));
const op_int16x8_t bl_val(Load8IntoLowerS16(
&input_base_ptr[c_block + depth * j + input_row_stride]));
const op_int16x8_t tr_val(
Load8IntoLowerS16(&input_base_ptr[c_block + depth * (j + 1)]));
const op_int16x8_t br_val(Load8IntoLowerS16(
&input_base_ptr[c_block + depth * (j + 1) + input_row_stride]));
const op_int16x8_t tmp_diff = tr_val - tl_val;
wdelta_c = tmp_diff << 4;
wdelta_twice_c = wdelta_c << 1;
hwdelta_c = (br_val - bl_val) - tmp_diff;
hwdelta_twice_c = hwdelta_c << 1;
op_int16x8_t incr_base = wdelta_c + hwdelta_c;
accum_0_c_v += incr_base;
incr_0_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_1_c_v += incr_base;
incr_1_c = incr_base << 1;
PairExtractUpper(accum_0_c_v.val, accum_1_c_v.val, &output_data_0_c,
&output_data_1_c);
vst1_u8(&output_base_ptr_0[c_block + depth * j * 8 + depth * 4],
output_data_0_c);
vst1_u8(&output_base_ptr_1[c_block + depth * j * 8 + depth * 4],
output_data_1_c);
incr_base += hwdelta_twice_c;
accum_2_c_v += incr_base;
incr_2_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_3_c_v += incr_base;
incr_3_c = incr_base << 1;
PairExtractUpper(accum_2_c_v.val, accum_3_c_v.val, &output_data_2_c,
&output_data_3_c);
vst1_u8(&output_base_ptr_2[c_block + depth * j * 8 + depth * 4],
output_data_2_c);
vst1_u8(&output_base_ptr_3[c_block + depth * j * 8 + depth * 4],
output_data_3_c);
incr_base += hwdelta_twice_c;
accum_4_c_v += incr_base;
incr_4_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_5_c_v += incr_base;
incr_5_c = incr_base << 1;
PairExtractUpper(accum_4_c_v.val, accum_5_c_v.val, &output_data_4_c,
&output_data_5_c);
vst1_u8(&output_base_ptr_4[c_block + depth * j * 8 + depth * 4],
output_data_4_c);
vst1_u8(&output_base_ptr_5[c_block + depth * j * 8 + depth * 4],
output_data_5_c);
incr_base += hwdelta_twice_c;
accum_6_c_v += incr_base;
incr_6_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_7_c_v += incr_base;
incr_7_c = incr_base << 1;
PairExtractUpper(accum_6_c_v.val, accum_7_c_v.val, &output_data_6_c,
&output_data_7_c);
vst1_u8(&output_base_ptr_6[c_block + depth * j * 8 + depth * 4],
output_data_6_c);
vst1_u8(&output_base_ptr_7[c_block + depth * j * 8 + depth * 4],
output_data_7_c);
for (int p = 1; p < 8; ++p) {
accum_0_c_v += incr_0_c;
accum_1_c_v += incr_1_c;
PairExtractUpper(accum_0_c_v.val, accum_1_c_v.val,
&output_data_0_c, &output_data_1_c);
vst1_u8(&output_base_ptr_0[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_0_c);
vst1_u8(&output_base_ptr_1[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_1_c);
accum_2_c_v += incr_2_c;
accum_3_c_v += incr_3_c;
PairExtractUpper(accum_2_c_v.val, accum_3_c_v.val,
&output_data_2_c, &output_data_3_c);
vst1_u8(&output_base_ptr_2[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_2_c);
vst1_u8(&output_base_ptr_3[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_3_c);
accum_4_c_v += incr_4_c;
accum_5_c_v += incr_5_c;
PairExtractUpper(accum_4_c_v.val, accum_5_c_v.val,
&output_data_4_c, &output_data_5_c);
vst1_u8(&output_base_ptr_4[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_4_c);
vst1_u8(&output_base_ptr_5[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_5_c);
accum_6_c_v += incr_6_c;
accum_7_c_v += incr_7_c;
PairExtractUpper(accum_6_c_v.val, accum_7_c_v.val,
&output_data_6_c, &output_data_7_c);
vst1_u8(&output_base_ptr_6[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_6_c);
vst1_u8(&output_base_ptr_7[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_7_c);
}
accum_0_c_v += (incr_0_c >> 1);
accum_1_c_v += (incr_1_c >> 1);
accum_2_c_v += (incr_2_c >> 1);
accum_3_c_v += (incr_3_c >> 1);
accum_4_c_v += (incr_4_c >> 1);
accum_5_c_v += (incr_5_c >> 1);
accum_6_c_v += (incr_6_c >> 1);
accum_7_c_v += (incr_7_c >> 1);
}
}
// Right margin.
{
// Accumulations have pre-added 0.5 for rounding, but that is just
// discarded and this just avoids re-loading.
PairExtractUpper(accum_0_c_v.val, accum_1_c_v.val, &output_data_0_c,
&output_data_1_c);
PairExtractUpper(accum_2_c_v.val, accum_3_c_v.val, &output_data_2_c,
&output_data_3_c);
PairExtractUpper(accum_4_c_v.val, accum_5_c_v.val, &output_data_4_c,
&output_data_5_c);
PairExtractUpper(accum_6_c_v.val, accum_7_c_v.val, &output_data_6_c,
&output_data_7_c);
for (int p = 0; p < 4; ++p) {
vst1_u8(&output_base_ptr_0[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_0_c);
vst1_u8(&output_base_ptr_1[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_1_c);
vst1_u8(&output_base_ptr_2[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_2_c);
vst1_u8(&output_base_ptr_3[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_3_c);
vst1_u8(&output_base_ptr_4[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_4_c);
vst1_u8(&output_base_ptr_5[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_5_c);
vst1_u8(&output_base_ptr_6[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_6_c);
vst1_u8(&output_base_ptr_7[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * p],
output_data_7_c);
}
}
}
output_base_ptr += output_row_stride * 8;
input_base_ptr += input_row_stride;
}
//
for (int c_block = 0; c_block < depth; c_block += 8) {
op_uint16x8_t accum_c_v;
// Bottom-left margin corner.
{
uint8x8_t output_data = vld1_u8(&input_base_ptr[c_block]);
vst1_u8(&output_base_ptr[c_block], output_data);
vst1_u8(&output_base_ptr[c_block + depth], output_data);
vst1_u8(&output_base_ptr[c_block + depth * 2], output_data);
vst1_u8(&output_base_ptr[c_block + depth * 3], output_data);
// Accumulate in 8.8 representation, pre-adding 0.5 for later rounding.
accum_c_v = vaddq_u16(Move8IntoUpperU16(output_data), vdupq_n_u16(128));
}
// Bottom-centre margin.
op_int16x8_t wdelta_c_v;
op_int16x8_t wdelta_twice_c_v;
for (int j = 0; j < (input_width - 1); ++j) {
{
uint8x8_t output_data_alt;
uint8x8_t output_data;
const op_int16x8_t tl_val(
Load8IntoLowerS16(&input_base_ptr[c_block + depth * j]));
const op_int16x8_t tr_val(
Load8IntoLowerS16(&input_base_ptr[c_block + depth * (j + 1)]));
wdelta_c_v = (tr_val - tl_val) << 4;
wdelta_twice_c_v = wdelta_c_v << 1;
op_uint16x8_t accum_c_v_alt = accum_c_v + wdelta_c_v;
accum_c_v = accum_c_v_alt + wdelta_twice_c_v;
PairExtractUpper(accum_c_v_alt.val, accum_c_v.val, &output_data_alt,
&output_data);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth * 4],
output_data_alt);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth + depth * 4],
output_data);
for (int p = 2; p < 8; p += 2) {
accum_c_v_alt = accum_c_v + wdelta_twice_c_v;
accum_c_v = accum_c_v_alt + wdelta_twice_c_v;
PairExtractUpper(accum_c_v_alt.val, accum_c_v.val, &output_data_alt,
&output_data);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth * p +
depth * 4],
output_data_alt);
vst1_u8(&output_base_ptr[c_block + depth * j * 8 + depth * (p + 1) +
depth * 4],
output_data);
}
accum_c_v += wdelta_c_v;
}
}
// Bottom-right margin corner.
{
uint8x8_t output_data_discard;
uint8x8_t output_data;
// Accumulations have pre-added 0.5 for rounding, but that is just
// discarded and this just avoids re-loading.
PairExtractUpper(accum_c_v.val, accum_c_v.val, &output_data,
&output_data_discard);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4],
output_data);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth],
output_data);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * 2],
output_data);
vst1_u8(&output_base_ptr[c_block + depth * (input_width - 1) * 8 +
depth * 4 + depth * 3],
output_data);
}
}
// Fill out remainder of bottom margin.
std::memcpy(output_base_ptr + output_row_stride, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 2, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 3, output_base_ptr,
output_row_stride * sizeof(uint8));
#else // USE_NEON
for (int c_block = 0; c_block < depth; c_block += 8) {
uint8 output_data[8];
uint16 accum[8];
// Top-left margin corner.
for (int c = 0; c < 8; ++c) {
output_data[c] = input_base_ptr[c_block + c];
output_base_ptr[c_block + c] = output_data[c];
output_base_ptr[c_block + c + depth] = output_data[c];
output_base_ptr[c_block + c + depth * 2] = output_data[c];
output_base_ptr[c_block + c + depth * 3] = output_data[c];
// Accumulate in 8.8 representation, pre-adding 0.5 for later rounding.
accum[c] =
(output_data[c] << 8) + 128; // 128 = 0.5 in 8.8 representation.
}
// Top-centre margin.
uint16 wdelta[8];
uint16 wdelta_twice[8];
for (int j = 0; j < (input_width - 1); ++j) {
for (int c = 0; c < 8; ++c) {
wdelta[c] = static_cast<uint16>(
input_base_ptr[c_block + c + depth * (j + 1)] -
input_base_ptr[c_block + c + depth * j])
<< 4;
wdelta_twice[c] = wdelta[c] << 1;
accum[c] += wdelta[c];
output_base_ptr[c_block + c + depth * j * 8 + depth * 4] =
accum[c] >> 8;
for (int p = 1; p < 8; ++p) {
accum[c] += wdelta_twice[c];
output_base_ptr[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum[c] >> 8;
}
accum[c] += wdelta[c];
}
}
// Top-right margin corner.
for (int c = 0; c < 8; ++c) {
// Accumulations have pre-added 0.5 for rounding, but that is just
// discarded and this just avoids re-loading.
output_data[c] = accum[c] >> 8;
TFLITE_DCHECK_EQ(
output_data[c],
input_base_ptr[c_block + c + depth * (input_width - 1)]);
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4] = output_data[c];
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth] = output_data[c];
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * 2] = output_data[c];
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * 3] = output_data[c];
}
}
// Fill out remainder of top margin.
std::memcpy(output_base_ptr + output_row_stride, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 2, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 3, output_base_ptr,
output_row_stride * sizeof(uint8));
output_base_ptr += output_row_stride * 4;
// Main rows.
for (int k = 0; k < (input_height - 1); ++k) {
for (int c_block = 0; c_block < depth; c_block += 8) {
uint8* output_base_ptr_0 = output_base_ptr;
uint8* output_base_ptr_1;
uint8* output_base_ptr_2;
uint8* output_base_ptr_3;
uint8* output_base_ptr_4;
uint8* output_base_ptr_5;
uint8* output_base_ptr_6;
uint8* output_base_ptr_7;
uint16 accum_0[8];
uint16 accum_1[8];
uint16 accum_2[8];
uint16 accum_3[8];
uint16 accum_4[8];
uint16 accum_5[8];
uint16 accum_6[8];
uint16 accum_7[8];
// We prefer accum_0[c], etc, in sense of packed-data array for
// register. However the compiler will not reliably optimize for an
// array, and so we do most of the work in pure scalar variables.
uint16 accum_0_c;
uint16 accum_1_c;
uint16 accum_2_c;
uint16 accum_3_c;
uint16 accum_4_c;
uint16 accum_5_c;
uint16 accum_6_c;
uint16 accum_7_c;
int16 hdelta_c;
int16 hdelta_twice_c;
// Left margin for 8 rows.
for (int c = 0; c < 8; ++c) {
hdelta_c = static_cast<uint16>(
input_base_ptr[c_block + c + input_row_stride] -
input_base_ptr[c_block + c])
<< 4;
// Accumulate in 8.8 representation, pre-adding 0.5 for later
// rounding.
accum_0_c = (input_base_ptr[c_block + c] << 8) + 128;
accum_0_c += hdelta_c;
output_base_ptr_0[c_block + c] = accum_0_c >> 8;
output_base_ptr_0[c_block + c + depth] = accum_0_c >> 8;
output_base_ptr_0[c_block + c + depth * 2] = accum_0_c >> 8;
output_base_ptr_0[c_block + c + depth * 3] = accum_0_c >> 8;
hdelta_twice_c = hdelta_c << 1;
output_base_ptr_1 = output_base_ptr_0 + output_row_stride;
accum_1_c = accum_0_c + hdelta_twice_c;
output_base_ptr_1[c_block + c] = accum_1_c >> 8;
output_base_ptr_1[c_block + c + depth] = accum_1_c >> 8;
output_base_ptr_1[c_block + c + depth * 2] = accum_1_c >> 8;
output_base_ptr_1[c_block + c + depth * 3] = accum_1_c >> 8;
output_base_ptr_2 = output_base_ptr_1 + output_row_stride;
accum_2_c = accum_1_c + hdelta_twice_c;
output_base_ptr_2[c_block + c] = accum_2_c >> 8;
output_base_ptr_2[c_block + c + depth] = accum_2_c >> 8;
output_base_ptr_2[c_block + c + depth * 2] = accum_2_c >> 8;
output_base_ptr_2[c_block + c + depth * 3] = accum_2_c >> 8;
output_base_ptr_3 = output_base_ptr_2 + output_row_stride;
accum_3_c = accum_2_c + hdelta_twice_c;
output_base_ptr_3[c_block + c] = accum_3_c >> 8;
output_base_ptr_3[c_block + c + depth] = accum_3_c >> 8;
output_base_ptr_3[c_block + c + depth * 2] = accum_3_c >> 8;
output_base_ptr_3[c_block + c + depth * 3] = accum_3_c >> 8;
output_base_ptr_4 = output_base_ptr_3 + output_row_stride;
accum_4_c = accum_3_c + hdelta_twice_c;
output_base_ptr_4[c_block + c] = accum_4_c >> 8;
output_base_ptr_4[c_block + c + depth] = accum_4_c >> 8;
output_base_ptr_4[c_block + c + depth * 2] = accum_4_c >> 8;
output_base_ptr_4[c_block + c + depth * 3] = accum_4_c >> 8;
output_base_ptr_5 = output_base_ptr_4 + output_row_stride;
accum_5_c = accum_4_c + hdelta_twice_c;
output_base_ptr_5[c_block + c] = accum_5_c >> 8;
output_base_ptr_5[c_block + c + depth] = accum_5_c >> 8;
output_base_ptr_5[c_block + c + depth * 2] = accum_5_c >> 8;
output_base_ptr_5[c_block + c + depth * 3] = accum_5_c >> 8;
output_base_ptr_6 = output_base_ptr_5 + output_row_stride;
accum_6_c = accum_5_c + hdelta_twice_c;
output_base_ptr_6[c_block + c] = accum_6_c >> 8;
output_base_ptr_6[c_block + c + depth] = accum_6_c >> 8;
output_base_ptr_6[c_block + c + depth * 2] = accum_6_c >> 8;
output_base_ptr_6[c_block + c + depth * 3] = accum_6_c >> 8;
output_base_ptr_7 = output_base_ptr_6 + output_row_stride;
accum_7_c = accum_6_c + hdelta_twice_c;
output_base_ptr_7[c_block + c] = accum_7_c >> 8;
output_base_ptr_7[c_block + c + depth] = accum_7_c >> 8;
output_base_ptr_7[c_block + c + depth * 2] = accum_7_c >> 8;
output_base_ptr_7[c_block + c + depth * 3] = accum_7_c >> 8;
accum_0[c] = accum_0_c;
accum_1[c] = accum_1_c;
accum_2[c] = accum_2_c;
accum_3[c] = accum_3_c;
accum_4[c] = accum_4_c;
accum_5[c] = accum_5_c;
accum_6[c] = accum_6_c;
accum_7[c] = accum_7_c;
}
// Main central body.
int16 wdelta_c;
int16 wdelta_twice_c;
int16 hwdelta_c;
int16 hwdelta_twice_c;
int16 incr_0_c;
int16 incr_1_c;
int16 incr_2_c;
int16 incr_3_c;
int16 incr_4_c;
int16 incr_5_c;
int16 incr_6_c;
int16 incr_7_c;
for (int j = 0; j < (input_width - 1); ++j) {
for (int c = 0; c < 8; ++c) {
accum_0_c = accum_0[c];
accum_1_c = accum_1[c];
accum_2_c = accum_2[c];
accum_3_c = accum_3[c];
accum_4_c = accum_4[c];
accum_5_c = accum_5[c];
accum_6_c = accum_6[c];
accum_7_c = accum_7[c];
wdelta_c = static_cast<uint16>(
input_base_ptr[c_block + c + depth * (j + 1)] -
input_base_ptr[c_block + c + depth * j])
<< 4;
wdelta_twice_c = wdelta_c << 1;
hwdelta_c = static_cast<uint16>(
input_base_ptr[c_block + c + depth * (j + 1) +
input_row_stride] -
input_base_ptr[c_block + c + depth * (j + 1)] -
input_base_ptr[c_block + c + depth * j + input_row_stride] +
input_base_ptr[c_block + c + depth * j]);
hwdelta_twice_c = hwdelta_c << 1;
uint16 incr_base = wdelta_c + hwdelta_c;
accum_0_c += incr_base;
output_base_ptr_0[c_block + c + depth * j * 8 + depth * 4] =
accum_0_c >> 8;
incr_0_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_1_c += incr_base;
output_base_ptr_1[c_block + c + depth * j * 8 + depth * 4] =
accum_1_c >> 8;
incr_1_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_2_c += incr_base;
output_base_ptr_2[c_block + c + depth * j * 8 + depth * 4] =
accum_2_c >> 8;
incr_2_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_3_c += incr_base;
output_base_ptr_3[c_block + c + depth * j * 8 + depth * 4] =
accum_3_c >> 8;
incr_3_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_4_c += incr_base;
output_base_ptr_4[c_block + c + depth * j * 8 + depth * 4] =
accum_4_c >> 8;
incr_4_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_5_c += incr_base;
output_base_ptr_5[c_block + c + depth * j * 8 + depth * 4] =
accum_5_c >> 8;
incr_5_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_6_c += incr_base;
output_base_ptr_6[c_block + c + depth * j * 8 + depth * 4] =
accum_6_c >> 8;
incr_6_c = incr_base << 1;
incr_base += hwdelta_twice_c;
accum_7_c += incr_base;
output_base_ptr_7[c_block + c + depth * j * 8 + depth * 4] =
accum_7_c >> 8;
incr_7_c = incr_base << 1;
for (int p = 1; p < 8; ++p) {
accum_0_c += incr_0_c;
output_base_ptr_0[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_0_c >> 8;
accum_1_c += incr_1_c;
output_base_ptr_1[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_1_c >> 8;
accum_2_c += incr_2_c;
output_base_ptr_2[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_2_c >> 8;
accum_3_c += incr_3_c;
output_base_ptr_3[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_3_c >> 8;
accum_4_c += incr_4_c;
output_base_ptr_4[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_4_c >> 8;
accum_5_c += incr_5_c;
output_base_ptr_5[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_5_c >> 8;
accum_6_c += incr_6_c;
output_base_ptr_6[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_6_c >> 8;
accum_7_c += incr_7_c;
output_base_ptr_7[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum_7_c >> 8;
}
accum_0_c += incr_0_c / 2;
accum_1_c += incr_1_c / 2;
accum_2_c += incr_2_c / 2;
accum_3_c += incr_3_c / 2;
accum_4_c += incr_4_c / 2;
accum_5_c += incr_5_c / 2;
accum_6_c += incr_6_c / 2;
accum_7_c += incr_7_c / 2;
accum_0[c] = accum_0_c;
accum_1[c] = accum_1_c;
accum_2[c] = accum_2_c;
accum_3[c] = accum_3_c;
accum_4[c] = accum_4_c;
accum_5[c] = accum_5_c;
accum_6[c] = accum_6_c;
accum_7[c] = accum_7_c;
}
}
// Right margin.
uint8 output_data_0_c;
uint8 output_data_1_c;
uint8 output_data_2_c;
uint8 output_data_3_c;
uint8 output_data_4_c;
uint8 output_data_5_c;
uint8 output_data_6_c;
uint8 output_data_7_c;
for (int c = 0; c < 8; ++c) {
accum_0_c = accum_0[c];
accum_1_c = accum_1[c];
accum_2_c = accum_2[c];
accum_3_c = accum_3[c];
accum_4_c = accum_4[c];
accum_5_c = accum_5[c];
accum_6_c = accum_6[c];
accum_7_c = accum_7[c];
// Accumulations have pre-added 0.5 for rounding, but that is just
// discarded and this just avoids re-loading.
output_data_0_c = accum_0_c >> 8;
output_data_1_c = accum_1_c >> 8;
output_data_2_c = accum_2_c >> 8;
output_data_3_c = accum_3_c >> 8;
output_data_4_c = accum_4_c >> 8;
output_data_5_c = accum_5_c >> 8;
output_data_6_c = accum_6_c >> 8;
output_data_7_c = accum_7_c >> 8;
for (int p = 0; p < 4; ++p) {
output_base_ptr_0[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_0_c;
output_base_ptr_1[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_1_c;
output_base_ptr_2[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_2_c;
output_base_ptr_3[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_3_c;
output_base_ptr_4[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_4_c;
output_base_ptr_5[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_5_c;
output_base_ptr_6[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_6_c;
output_base_ptr_7[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * p] = output_data_7_c;
}
accum_0[c] = accum_0_c;
accum_1[c] = accum_1_c;
accum_2[c] = accum_2_c;
accum_3[c] = accum_3_c;
accum_4[c] = accum_4_c;
accum_5[c] = accum_5_c;
accum_6[c] = accum_6_c;
accum_7[c] = accum_7_c;
}
}
output_base_ptr += output_row_stride * 8;
input_base_ptr += input_row_stride;
}
for (int c_block = 0; c_block < depth; c_block += 8) {
uint8 output_data[8];
uint16 accum[8];
// Bottom-left margin corner.
for (int c = 0; c < 8; ++c) {
output_data[c] = input_base_ptr[c_block + c];
output_base_ptr[c_block + c] = output_data[c];
output_base_ptr[c_block + c + depth] = output_data[c];
output_base_ptr[c_block + c + depth * 2] = output_data[c];
output_base_ptr[c_block + c + depth * 3] = output_data[c];
// Accumulate in 8.8 representation, pre-adding 0.5 for later rounding.
accum[c] =
(output_data[c] << 8) + 128; // 128 = 0.5 in 8.8 representation.
}
// Bottom-centre margin.
uint16 wdelta[8];
uint16 wdelta_twice[8];
for (int j = 0; j < (input_width - 1); ++j) {
for (int c = 0; c < 8; ++c) {
wdelta[c] = static_cast<uint16>(
input_base_ptr[c_block + c + depth * (j + 1)] -
input_base_ptr[c_block + c + depth * j])
<< 4;
wdelta_twice[c] = wdelta[c] << 1;
accum[c] += wdelta[c];
output_base_ptr[c_block + c + depth * j * 8 + depth * 4] =
accum[c] >> 8;
for (int p = 1; p < 8; ++p) {
accum[c] += wdelta_twice[c];
output_base_ptr[c_block + c + depth * j * 8 + depth * p +
depth * 4] = accum[c] >> 8;
}
accum[c] += wdelta[c];
}
}
// Bottom-right margin corner.
for (int c = 0; c < 8; ++c) {
// Accumulations have pre-added 0.5 for rounding, but that is just
// discarded and this just avoids re-loading.
output_data[c] = accum[c] >> 8;
TFLITE_DCHECK_EQ(
output_data[c],
input_base_ptr[c_block + c + depth * (input_width - 1)]);
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4] = output_data[c];
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth] = output_data[c];
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * 2] = output_data[c];
output_base_ptr[c_block + c + depth * (input_width - 1) * 8 +
depth * 4 + depth * 3] = output_data[c];
}
}
// Fill out remainder of bottom margin.
std::memcpy(output_base_ptr + output_row_stride, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 2, output_base_ptr,
output_row_stride * sizeof(uint8));
std::memcpy(output_base_ptr + output_row_stride * 3, output_base_ptr,
output_row_stride * sizeof(uint8));
#endif // USE_NEON
}
} // NOLINT(readability/fn_size)
} // namespace resize_bilinear
#ifdef USE_NEON
inline void ResizeBilinearKernel(const float* input_ptr, int32 depth,
float scale, float* output_ptr) {
int ic = 0;
// Handle 32 input channels at a time.
for (; ic <= depth - 32; ic += 32) {
float32x4x2_t input[4];
for (int i = 0; i < 4; i++) {
input[i].val[0] = vld1q_f32(input_ptr + 8 * i);
input[i].val[1] = vld1q_f32(input_ptr + 8 * i + 4);
}
float32x4x2_t acc[4];
for (int i = 0; i < 4; i++) {
acc[i].val[0] = vld1q_f32(output_ptr + 8 * i);
acc[i].val[1] = vld1q_f32(output_ptr + 8 * i + 4);
}
for (int i = 0; i < 4; i++) {
acc[i].val[0] = vmlaq_n_f32(acc[i].val[0], input[i].val[0], scale);
acc[i].val[1] = vmlaq_n_f32(acc[i].val[1], input[i].val[1], scale);
}
for (int i = 0; i < 4; i++) {
vst1q_f32(output_ptr, acc[i].val[0]);
vst1q_f32(output_ptr + 4, acc[i].val[1]);
output_ptr += 8;
}
input_ptr += 32;
}
// Handle 16 input channels at a time.
for (; ic <= depth - 16; ic += 16) {
float32x4x2_t input[2];
for (int i = 0; i < 2; i++) {
input[i].val[0] = vld1q_f32(input_ptr + 8 * i);
input[i].val[1] = vld1q_f32(input_ptr + 8 * i + 4);
}
float32x4x2_t acc[2];
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vld1q_f32(output_ptr + 8 * i);
acc[i].val[1] = vld1q_f32(output_ptr + 8 * i + 4);
}
for (int i = 0; i < 2; i++) {
acc[i].val[0] = vmlaq_n_f32(acc[i].val[0], input[i].val[0], scale);
acc[i].val[1] = vmlaq_n_f32(acc[i].val[1], input[i].val[1], scale);
}
for (int i = 0; i < 2; i++) {
vst1q_f32(output_ptr, acc[i].val[0]);
vst1q_f32(output_ptr + 4, acc[i].val[1]);
output_ptr += 8;
}
input_ptr += 16;
}
// Handle 8 input channels at a time.
for (; ic <= depth - 8; ic += 8) {
float32x4x2_t input;
input.val[0] = vld1q_f32(input_ptr);
input.val[1] = vld1q_f32(input_ptr + 4);
float32x4x2_t acc;
acc.val[0] = vld1q_f32(output_ptr);
acc.val[1] = vld1q_f32(output_ptr + 4);
acc.val[0] = vmlaq_n_f32(acc.val[0], input.val[0], scale);
acc.val[1] = vmlaq_n_f32(acc.val[1], input.val[1], scale);
vst1q_f32(output_ptr, acc.val[0]);
vst1q_f32(output_ptr + 4, acc.val[1]);
input_ptr += 8;
output_ptr += 8;
}
// Handle 4 input channels at a time.
for (; ic <= depth - 4; ic += 4) {
float32x4_t input = vld1q_f32(input_ptr);
float32x4_t acc = vld1q_f32(output_ptr);
acc = vmlaq_n_f32(acc, input, scale);
vst1q_f32(output_ptr, acc);
input_ptr += 4;
output_ptr += 4;
}
// Handle 1 input channel at a time.
for (; ic < depth; ic++) {
*output_ptr += *input_ptr * scale;
output_ptr++;
input_ptr++;
}
}
#else
inline void ResizeBilinearKernel(const float* input_ptr, int32 depth,
float scale, float* output_ptr) {
for (int32 i = 0; i < depth; i++) {
*output_ptr += *input_ptr * scale;
output_ptr++;
input_ptr++;
}
}
#endif
inline void ResizeBilinearKernel2x2(int32 x0, int32 x1, int32 y0, int32 y1,
int32 x, int32 y, int32 depth, int32 batch,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int32 input_width = input_shape.Dims(2);
const int32 output_width = output_shape.Dims(2);
const int32 input_x_offset = (x1 - x0) * depth;
const int32 input_y_offset = (y1 - y0) * depth * input_width;
const int32 output_x_offset = depth;
const int32 output_y_offset = depth * output_width;
#ifdef USE_NEON
TFLITE_DCHECK(x1 >= x0);
TFLITE_DCHECK(y1 >= y0);
int ic = 0;
// Handle 8 input channels at a time.
for (; ic <= depth - 8; ic += 8) {
const float* input_ptr = nullptr;
float32x4x2_t x0y0;
input_ptr = &input_data[Offset(input_shape, batch, y0, x0, ic)];
x0y0.val[0] = vld1q_f32(input_ptr);
x0y0.val[1] = vld1q_f32(input_ptr + 4);
float32x4x2_t x1y0;
input_ptr += input_x_offset;
x1y0.val[0] = vld1q_f32(input_ptr);
x1y0.val[1] = vld1q_f32(input_ptr + 4);
float32x4x2_t x0y1;
input_ptr += -input_x_offset + input_y_offset;
x0y1.val[0] = vld1q_f32(input_ptr);
x0y1.val[1] = vld1q_f32(input_ptr + 4);
float32x4x2_t x1y1;
input_ptr += input_x_offset;
x1y1.val[0] = vld1q_f32(input_ptr);
x1y1.val[1] = vld1q_f32(input_ptr + 4);
// Top left corner.
float* output_ptr = &output_data[Offset(output_shape, batch, y, x, ic)];
vst1q_f32(output_ptr, x0y0.val[0]);
vst1q_f32(output_ptr + 4, x0y0.val[1]);
// Top right corner.
output_ptr += output_x_offset;
float32x4x2_t tr;
tr.val[0] = vaddq_f32(x0y0.val[0], x1y0.val[0]);
tr.val[1] = vaddq_f32(x0y0.val[1], x1y0.val[1]);
tr.val[0] = vmulq_n_f32(tr.val[0], 0.5f);
tr.val[1] = vmulq_n_f32(tr.val[1], 0.5f);
vst1q_f32(output_ptr, tr.val[0]);
vst1q_f32(output_ptr + 4, tr.val[1]);
// Bottom left corner.
output_ptr += -output_x_offset + output_y_offset;
float32x4x2_t bl;
bl.val[0] = vaddq_f32(x0y0.val[0], x0y1.val[0]);
bl.val[1] = vaddq_f32(x0y0.val[1], x0y1.val[1]);
bl.val[0] = vmulq_n_f32(bl.val[0], 0.5f);
bl.val[1] = vmulq_n_f32(bl.val[1], 0.5f);
vst1q_f32(output_ptr, bl.val[0]);
vst1q_f32(output_ptr + 4, bl.val[1]);
// Bottom right corner.
output_ptr += output_x_offset;
float32x4x2_t br;
br.val[0] = vaddq_f32(x1y0.val[0], x1y1.val[0]);
br.val[1] = vaddq_f32(x1y0.val[1], x1y1.val[1]);
br.val[0] = vmlaq_n_f32(bl.val[0], br.val[0], 0.5f);
br.val[1] = vmlaq_n_f32(bl.val[1], br.val[1], 0.5f);
br.val[0] = vmulq_n_f32(br.val[0], 0.5f);
br.val[1] = vmulq_n_f32(br.val[1], 0.5f);
vst1q_f32(output_ptr, br.val[0]);
vst1q_f32(output_ptr + 4, br.val[1]);
}
// Handle 4 input channels at a time.
for (; ic <= depth - 4; ic += 4) {
const float* input_ptr =
&input_data[Offset(input_shape, batch, y0, x0, ic)];
float32x4_t x0y0 = vld1q_f32(input_ptr);
float32x4_t x1y0 = vld1q_f32(input_ptr + input_x_offset);
float32x4_t x0y1 = vld1q_f32(input_ptr + input_y_offset);
float32x4_t x1y1 = vld1q_f32(input_ptr + input_x_offset + input_y_offset);
// Top left corner.
float* output_ptr = &output_data[Offset(output_shape, batch, y, x, ic)];
vst1q_f32(output_ptr, x0y0);
// Top right corner.
output_ptr += output_x_offset;
float32x4_t tr = vaddq_f32(x0y0, x1y0);
tr = vmulq_n_f32(tr, 0.5f);
vst1q_f32(output_ptr, tr);
// Bottom left corner.
output_ptr += -output_x_offset + output_y_offset;
float32x4_t bl = vaddq_f32(x0y0, x0y1);
bl = vmulq_n_f32(bl, 0.5f);
vst1q_f32(output_ptr, bl);
// Bottom right corner.
output_ptr += output_x_offset;
float32x4_t br = vaddq_f32(x1y0, x1y1);
br = vmlaq_n_f32(bl, br, 0.5f);
br = vmulq_n_f32(br, 0.5f);
vst1q_f32(output_ptr, br);
}
// Handle one input channel at a time.
for (; ic < depth; ic++) {
const int32 input_offset = Offset(input_shape, batch, y0, x0, ic);
float x0y0 = input_data[input_offset];
float x1y0 = input_data[input_offset + input_x_offset];
float x0y1 = input_data[input_offset + input_y_offset];
float x1y1 = input_data[input_offset + input_x_offset + input_y_offset];
// Top left corner.
const int32 output_offset = Offset(output_shape, batch, y, x, ic);
output_data[output_offset] = x0y0;
// Top right corner.
output_data[output_offset + output_x_offset] = (x0y0 + x1y0) / 2;
// Bottom left corner.
float output = (x0y0 + x0y1) / 2;
output_data[output_offset + output_y_offset] = output;
// Bottom right corner.
output_data[output_offset + output_x_offset + output_y_offset] =
(output + ((x1y0 + x1y1) / 2)) / 2;
}
#else
for (int ch = 0; ch < depth; ch++) {
const int32 input_offset = Offset(input_shape, batch, y0, x0, ch);
float x0y0 = input_data[input_offset];
float x1y0 = input_data[input_offset + input_x_offset];
float x0y1 = input_data[input_offset + input_y_offset];
float x1y1 = input_data[input_offset + input_x_offset + input_y_offset];
// Top left corner.
const int32 output_offset = Offset(output_shape, batch, y, x, ch);
output_data[output_offset] = x0y0;
// Top right corner.
output_data[output_offset + output_x_offset] = (x0y0 + x1y0) / 2;
// Bottom left corner.
float output = (x0y0 + x0y1) / 2;
output_data[output_offset + output_y_offset] = output;
// Bottom right corner.
output_data[output_offset + output_x_offset + output_y_offset] =
(output + ((x1y0 + x1y1) / 2)) / 2;
}
#endif
}
inline void ResizeBilinear2x2(int32 batches, int32 input_height,
int32 input_width, int32 depth,
int32 output_height, int32 output_width,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
float* output_data) {
for (int b = 0; b < batches; b++) {
for (int y0 = 0, y = 0; y <= output_height - 2; y += 2, y0++) {
for (int x0 = 0, x = 0; x <= output_width - 2; x += 2, x0++) {
int32 x1 = std::min(x0 + 1, input_width - 1);
int32 y1 = std::min(y0 + 1, input_height - 1);
ResizeBilinearKernel2x2(x0, x1, y0, y1, x, y, depth, b, input_shape,
input_data, output_shape, output_data);
}
}
}
}
inline void ResizeBilinearGeneric(
int32 batches, int32 input_height, int32 input_width, int32 depth,
int32 output_height, int32 output_width, float height_scale,
float width_scale, const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data,
const bool half_pixel_centers) {
memset(output_data, 0,
batches * output_height * output_width * depth * sizeof(float));
int32 output_offset = 0;
for (int b = 0; b < batches; ++b) {
for (int y = 0; y < output_height; ++y) {
float input_y;
int32 y0, y1;
reference_ops::ComputeInterpolationValues(
y, height_scale, half_pixel_centers, input_height, &input_y, &y0,
&y1);
for (int x = 0; x < output_width; ++x) {
float input_x;
int32 x0, x1;
reference_ops::ComputeInterpolationValues(
x, width_scale, half_pixel_centers, input_width, &input_x, &x0,
&x1);
float* output_ptr = &output_data[output_offset];
// Run kernel on the 4 corners of the bilinear resize algorithm.
int32 input_offset = Offset(input_shape, b, y0, x0, 0);
float scale = (1 - (input_y - y0)) * (1 - (input_x - x0));
const float* input_ptr = &input_data[input_offset];
ResizeBilinearKernel(input_ptr, depth, scale, output_ptr);
input_offset = Offset(input_shape, b, y0, x1, 0);
scale = (1 - (input_y - y0)) * (input_x - x0);
input_ptr = &input_data[input_offset];
ResizeBilinearKernel(input_ptr, depth, scale, output_ptr);
input_offset = Offset(input_shape, b, y1, x0, 0);
scale = (input_y - y0) * (1 - (input_x - x0));
input_ptr = &input_data[input_offset];
ResizeBilinearKernel(input_ptr, depth, scale, output_ptr);
input_offset = Offset(input_shape, b, y1, x1, 0);
scale = (input_y - y0) * (input_x - x0);
input_ptr = &input_data[input_offset];
ResizeBilinearKernel(input_ptr, depth, scale, output_ptr);
output_offset += depth;
}
}
}
}
template <typename T>
inline void ResizeBilinearGenericSmallChannel(
int32 batches, int32 input_height, int32 input_width, int32 depth,
int32 output_height, int32 output_width, float height_scale,
float width_scale, const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data,
const bool half_pixel_centers) {
T* output_ptr = &output_data[0];
const float rounding_offset = std::numeric_limits<T>::is_integer ? .5f : .0f;
for (int b = 0; b < batches; ++b) {
for (int y = 0; y < output_height; ++y) {
float input_y;
int32 y0, y1;
reference_ops::ComputeInterpolationValues(
y, height_scale, half_pixel_centers, input_height, &input_y, &y0,
&y1);
for (int x = 0; x < output_width; ++x) {
float input_x;
int32 x0, x1;
reference_ops::ComputeInterpolationValues(
x, width_scale, half_pixel_centers, input_width, &input_x, &x0,
&x1);
int32 input_offset[4] = {Offset(input_shape, b, y0, x0, 0),
Offset(input_shape, b, y0, x1, 0),
Offset(input_shape, b, y1, x0, 0),
Offset(input_shape, b, y1, x1, 0)};
float scale[4] = {(1 - (input_y - y0)) * (1 - (input_x - x0)),
(1 - (input_y - y0)) * (input_x - x0),
(input_y - y0) * (1 - (input_x - x0)),
(input_y - y0) * (input_x - x0)};
for (int d = 0; d < depth; d++) {
const T* input_ptr = &input_data[d];
*output_ptr++ = static_cast<T>(input_ptr[input_offset[0]] * scale[0] +
input_ptr[input_offset[1]] * scale[1] +
input_ptr[input_offset[2]] * scale[2] +
input_ptr[input_offset[3]] * scale[3] +
rounding_offset);
}
}
}
}
}
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape,
const float* input_data,
const RuntimeShape& output_size_shape,
const int32* output_size_data,
const RuntimeShape& unextended_output_shape,
float* output_data) {
ruy::profiler::ScopeLabel label("ResizeBilinear");
// If half_pixel_centers is True, align_corners must be False.
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
int32 input_height = input_shape.Dims(1);
int32 input_width = input_shape.Dims(2);
int32 depth = MatchingDim(input_shape, 3, output_shape, 3);
TFLITE_DCHECK_EQ(output_size_shape.FlatSize(), 2);
int32 output_height = output_size_data[0];
int32 output_width = output_size_data[1];
// Specialize for 2x2 upsample.
if (!op_params.align_corners && !op_params.half_pixel_centers &&
output_height == 2 * input_height && output_width == 2 * input_width) {
ResizeBilinear2x2(batches, input_height, input_width, depth, output_height,
output_width, input_shape, input_data, output_shape,
output_data);
} else {
float height_scale = static_cast<float>(input_height) / output_height;
float width_scale = static_cast<float>(input_width) / output_width;
if (op_params.align_corners && output_height > 1) {
height_scale = static_cast<float>(input_height - 1) / (output_height - 1);
}
if (op_params.align_corners && output_width > 1) {
width_scale = static_cast<float>(input_width - 1) / (output_width - 1);
}
ResizeBilinearGeneric(batches, input_height, input_width, depth,
output_height, output_width, height_scale,
width_scale, input_shape, input_data, output_shape,
output_data, op_params.half_pixel_centers);
}
}
// Note: This is not a universal quantized bilinear. It does not use int8
// or int16 arithmetic.
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape,
const uint8* input_data,
const RuntimeShape& output_size_shape,
const int32* output_size_data,
const RuntimeShape& unextended_output_shape,
uint8* output_data) {
ruy::profiler::ScopeLabel label("ResizeBilinearUint8");
// If half_pixel_centers is True, align_corners must be False.
TFLITE_DCHECK(!op_params.half_pixel_centers || !op_params.align_corners);
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
int32 batches = MatchingDim(input_shape, 0, output_shape, 0);
int32 input_height = input_shape.Dims(1);
int32 input_width = input_shape.Dims(2);
int32 depth = MatchingDim(input_shape, 3, output_shape, 3);
TFLITE_DCHECK_EQ(output_size_shape.FlatSize(), 2);
int32 output_height = output_size_data[0];
int32 output_width = output_size_data[1];
if (!op_params.align_corners && op_params.half_pixel_centers &&
((depth % 8) == 0)) {
const int32 scale = output_height / input_height;
// Restricting the minimum output dimensions may not be necessary, but
// ensures that kernels can use unrolling with minimal code size.
if ((output_height >= 8) && (output_width >= 8) &&
((input_height * scale) == output_height) &&
((input_width * scale) == output_width)) {
if (scale == 8) {
resize_bilinear::ResizeBilinear888Uint8(
batches, input_height, input_width, depth, input_data, output_data);
return;
}
}
}
float height_scale =
(op_params.align_corners && output_height > 1)
? (static_cast<float>(input_height - 1) / (output_height - 1))
: (static_cast<float>(input_height) / output_height);
float width_scale =
(op_params.align_corners && output_width > 1)
? (static_cast<float>(input_width - 1) / (output_width - 1))
: (static_cast<float>(input_width) / output_width);
ResizeBilinearGenericSmallChannel<uint8>(
batches, input_height, input_width, depth, output_height, output_width,
height_scale, width_scale, input_shape, input_data, output_shape,
output_data, op_params.half_pixel_centers);
}
// TODO(b/180609127) Create optimized int8 version from uint8. Call from here.
inline void ResizeBilinear(const tflite::ResizeBilinearParams& op_params,
const RuntimeShape& unextended_input_shape,
const int8* input_data,
const RuntimeShape& unextended_output_size_shape,
const int32* output_size_data,
const RuntimeShape& unextended_output_shape,
int8* output_data) {
reference_ops::ResizeBilinearInteger(op_params, unextended_input_shape,
input_data, unextended_output_size_shape,
output_size_data,
unextended_output_shape, output_data);
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_RESIZE_BILINEAR_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/resize_bilinear.h | C++ | apache-2.0 | 70,083 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SPARSE_OPS_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SPARSE_OPS_FULLY_CONNECTED_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/cpu_backend_threadpool.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/tensor_utils.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace optimized_ops {
inline void FullyConnectedSparseWeight(
const TfLiteSparsity& sparsity, const FullyConnectedParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& weights_shape, const float* weights_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data) {
ruy::profiler::ScopeLabel label("FullyConnected");
ruy::profiler::ScopeLabel inner_label("Random Sparse");
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
const int output_elements = output_shape.FlatSize();
const int output_dims_count = output_shape.DimensionsCount();
const int weights_dims_count = weights_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2,
output_shape, output_dims_count - 1);
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);
const int w0_size = sparsity.dim_metadata[0].dense_size;
const int* w1_segments = sparsity.dim_metadata[1].array_segments->data;
const int* w1_indices = sparsity.dim_metadata[1].array_indices->data;
for (int i = 0; i < output_elements; ++i) {
output_data[i] = 0.f;
}
for (int b = 0; b < batches; ++b) {
for (int idx_0 = 0; idx_0 < w0_size; ++idx_0) {
for (int pw1 = w1_segments[idx_0]; pw1 < w1_segments[idx_0 + 1]; ++pw1) {
int idx_1 = w1_indices[pw1];
output_data[b * output_depth + idx_0] +=
weights_data[pw1] * input_data[b * accum_depth + idx_1];
}
}
}
for (int b = 0; b < batches; ++b) {
for (int i = 0; i < output_depth; ++i) {
float total = output_data[b * output_depth + i];
float bias_value = bias_data[i];
output_data[b * output_depth + i] = ActivationFunctionWithMinMax(
total + bias_value, output_activation_min, output_activation_max);
}
}
}
inline void FullyConnectedSparseWeight1x4Impl(
const TfLiteSparsity& sparsity, const FullyConnectedParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& weights_shape, const float* weights_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data, int thread_start,
int thread_end, const CpuBackendContext& cpu_backend_context) {
ruy::profiler::ScopeLabel label("FullyConnected");
ruy::profiler::ScopeLabel inner_label("1x4 Block Sparse");
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
const int input_dims_count = input_shape.DimensionsCount();
const int output_dims_count = output_shape.DimensionsCount();
const int weights_dims_count = weights_shape.DimensionsCount();
const int batches = thread_end - thread_start;
const int input_depth = MatchingDim(weights_shape, weights_dims_count - 1,
input_shape, input_dims_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2,
output_shape, output_dims_count - 1);
const int* w1_segments = sparsity.dim_metadata[1].array_segments->data;
const int* w1_indices = sparsity.dim_metadata[1].array_indices->data;
tensor_utils::SparseMatrixBatchVectorMultiplyAccumulate1x4(
weights_data, w1_segments, w1_indices, weights_shape.Dims(0),
weights_shape.Dims(1), input_data + thread_start * input_depth, batches,
output_data + thread_start * output_depth);
ruy::profiler::ScopeLabel activation_label("activation function");
for (int b = thread_start; b < thread_end; ++b) {
for (int i = 0; i < output_depth; ++i) {
float total = output_data[b * output_depth + i];
float bias_value = bias_data[i];
output_data[b * output_depth + i] = ActivationFunctionWithMinMax(
total + bias_value, output_activation_min, output_activation_max);
}
}
}
struct FullyConnectedSparseWeight1x4Task : cpu_backend_threadpool::Task {
FullyConnectedSparseWeight1x4Task(
const TfLiteSparsity& sparsity, const FullyConnectedParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& weights_shape, const float* weights_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data, int thread_start,
int thread_end, const CpuBackendContext& cpu_backend_context_x)
: sparsity(sparsity),
params(params),
input_shape(input_shape),
input_data(input_data),
weights_shape(weights_shape),
weights_data(weights_data),
bias_shape(bias_shape),
bias_data(bias_data),
output_shape(output_shape),
output_data(output_data),
thread_start(thread_start),
thread_end(thread_end),
cpu_backend_context(cpu_backend_context_x) {}
void Run() override {
FullyConnectedSparseWeight1x4Impl(
sparsity, params, input_shape, input_data, weights_shape, weights_data,
bias_shape, bias_data, output_shape, output_data, thread_start,
thread_end, cpu_backend_context);
}
private:
const TfLiteSparsity& sparsity;
const FullyConnectedParams& params;
const RuntimeShape& input_shape;
const float* input_data;
const RuntimeShape& weights_shape;
const float* weights_data;
const RuntimeShape& bias_shape;
const float* bias_data;
const RuntimeShape& output_shape;
float* output_data;
int thread_start;
int thread_end;
const CpuBackendContext& cpu_backend_context;
};
// The multi-threaded kernel slices the workload along the batch dimension. If
// there's not enough batches of data, the number of threads used is equal to
// the batch size. We can improve this later with slicing along the row
// dimension of the weight.
inline void FullyConnectedSparseWeight1x4(
const TfLiteSparsity& sparsity, const FullyConnectedParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& weights_shape, const float* weights_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
CpuBackendContext* cpu_backend_context) {
const int output_elements = output_shape.FlatSize();
memset(output_data, 0, output_elements * sizeof(float));
const int max_threads = cpu_backend_context->max_num_threads();
const int batches =
FlatSizeSkipDim(output_shape, output_shape.DimensionsCount() - 1);
const int thread_count = std::max(1, std::min(batches, max_threads));
if (thread_count == 1) {
return FullyConnectedSparseWeight1x4Impl(
sparsity, params, input_shape, input_data, weights_shape, weights_data,
bias_shape, bias_data, output_shape, output_data, 0, batches,
*cpu_backend_context);
}
std::vector<FullyConnectedSparseWeight1x4Task> tasks;
tasks.reserve(thread_count);
int thread_start = 0;
for (int i = 0; i < thread_count; ++i) {
// This makes sure the workload is relatively balanced when batches is not a
// multiple of thread_count. The first mod(batches, thread_count) tasks need
// to process one more batch than the rest.
int thread_end = thread_start + batches / thread_count;
if (i < batches % thread_count) thread_end++;
tasks.emplace_back(sparsity, params, input_shape, input_data, weights_shape,
weights_data, bias_shape, bias_data, output_shape,
output_data, thread_start, thread_end,
*cpu_backend_context);
thread_start = thread_end;
}
cpu_backend_threadpool::Execute(tasks.size(), tasks.data(),
cpu_backend_context);
}
} // namespace optimized_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SPARSE_OPS_FULLY_CONNECTED_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/sparse_ops/fully_connected.h | C++ | apache-2.0 | 9,404 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_CHECK_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_CHECK_H_
#if defined(__SSSE3__)
// SSSE 3 available: Use the SSE code.
#define SSE_OR_PORTABLE(funcname, ...) Sse##funcname(__VA_ARGS__)
#else
// No SSSE 3 available: Use Portable code
#define SSE_OR_PORTABLE(funcname, ...) Portable##funcname(__VA_ARGS__)
#endif
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_CHECK_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/sse_check.h | C | apache-2.0 | 1,122 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/optimized/sse_tensor_utils_impl.h"
#ifdef __SSSE3__
#include <emmintrin.h> // SSE2
#include <tmmintrin.h> // SSSE3
#ifdef __SSE4_1__
#include <smmintrin.h> // SSE4.1
#endif
#include <cstdint>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm.h"
#include "tensorflow/lite/kernels/cpu_backend_gemm_params.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace tensor_utils {
namespace {
// Dot product of four int8 vectors of 4 elements packed into a XMM register.
// Result is four int32 scalars packed into a XMM register.
// int8x4x4 · int8x4x4 => int32x4
static inline __m128i DotProdInt8x4x4(__m128i a_8x16, __m128i b_8x16) {
// Transfer sign from 'a' to 'b', as _mm_maddubs_epi16 treats 'a' unsigned.
b_8x16 = _mm_sign_epi8(b_8x16, a_8x16);
a_8x16 = _mm_abs_epi8(a_8x16);
// sumprod[i] = a[2*i]*b[2*i] + a[2*i+1]*b[2*i+1] (i = 0..7)
__m128i sumprod_16x8 = _mm_maddubs_epi16(a_8x16, b_8x16);
// sumprod[i] = sumprod[2*i]*1 + sumprod[2*i+1]*1 (i = 0..3)
return _mm_madd_epi16(sumprod_16x8, _mm_set1_epi16(1));
}
// Horizontally add 4 int32 values stored in a single XMM register to int32_t.
static inline int32_t ReduceInt32x4(__m128i acc) {
// Shuffle to contain high half of acc (both in high and low halfs).
__m128i shuffle = _mm_unpackhi_epi64(acc, acc);
// Add shuffle and acc; low half is sums of twos (high half is ignored).
acc = _mm_add_epi32(acc, shuffle);
// Shuffle the two elements in low half (ignore high half).
shuffle = _mm_shuffle_epi32(acc, _MM_SHUFFLE(2, 3, 0, 1));
// Add shuffle and acc; lowest element is sum of all 4 input.
acc = _mm_add_epi32(acc, shuffle);
// Return lowest element as int32_t.
return _mm_cvtsi128_si32(acc);
}
// Horizontally add each of 4 XMM registers with 4 int32 values, pack result
// into a single XMM register. Similar to ReduceInt32x4, but with 4x inputs.
static inline __m128i ReduceInt32x4x4(__m128i a, __m128i b, __m128i c,
__m128i d) {
// Assuming x = [x0, x1, x2, x3]
const __m128i a_b_lo_half = _mm_unpacklo_epi32(a, b); // [a0, b0, a1, b1]
const __m128i a_b_hi_half = _mm_unpackhi_epi32(a, b); // [a2, b2, a3, b3]
const __m128i a_plus_b =
_mm_add_epi32(a_b_lo_half, a_b_hi_half); // [a0+a2, b0+b2, a1+a3, b1+b3]
const __m128i c_d_lo_half = _mm_unpacklo_epi32(c, d); // [c0, d0, c1, d1]
const __m128i c_d_hi_half = _mm_unpackhi_epi32(c, d); // [c2, d2, c3, d3]
const __m128i c_plus_d =
_mm_add_epi32(c_d_lo_half, c_d_hi_half); // [c0+c2, d0+d2, c1+c3, d1+d3]
const __m128i all_evns =
_mm_unpacklo_epi64(a_plus_b, c_plus_d); // [a02, b02, c02, d02]
const __m128i all_odds =
_mm_unpackhi_epi64(a_plus_b, c_plus_d); // [a13, b13, c13, d13]
return _mm_add_epi32(all_evns, all_odds); // [a0123, b0123, c0123, d0123]
}
// Returns the ith element of a XMM register holding float numbers.
template <int i>
float GetFloatVectorElement(__m128 v) {
static_assert(i >= 0 && i < 4, "The index must be 0 <= i < 4.");
// Note, _mm_extract_ps returns int, so we can't use it here.
// These lines will be optimized to extractps anyway.
v = _mm_shuffle_ps(v, v, _MM_SHUFFLE(i, i, i, i));
return _mm_cvtss_f32(v);
}
} // namespace
void SseMatrixBatchVectorMultiplyAccumulateImpl(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, const int32_t* row_sums) {
for (std::intptr_t batch = 0; batch < n_batch; ++batch) {
const float batch_scaling_factor = scaling_factors[batch];
const int32_t batch_offset = input_offset ? input_offset[batch] : 0;
// Compute dot-product for every column.
for (std::intptr_t row = 0; row < m_rows; ++row) {
// Get the address of the first element of the row.
const int8_t* __restrict__ row_ptr = matrix + row * m_cols;
const float row_scale =
per_channel_scale ? per_channel_scale[row] * batch_scaling_factor
: batch_scaling_factor;
const int32_t row_offset =
row_sums && batch_offset ? batch_offset * row_sums[row] : 0;
// Initialize the dot product sum for the row to 0.
__m128i dotprod_32x4 = _mm_setzero_si128();
std::intptr_t col = 0;
// For every block of 16x 8-bit inputs.
while (col < (m_cols & ~15)) {
const __m128i vec_8x16 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(vectors + col));
const __m128i row_8x16 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(row_ptr + col));
// dotprod += vec · row
dotprod_32x4 =
_mm_add_epi32(dotprod_32x4, DotProdInt8x4x4(vec_8x16, row_8x16));
col += 16;
}
#ifdef __SSE4_1__
// Postamble for 8x 8-bit inputs.
if (col < (m_cols & ~7)) {
const __m128i vec_16x8 = _mm_cvtepi8_epi16(
_mm_loadl_epi64(reinterpret_cast<const __m128i*>(vectors + col)));
const __m128i row_16x8 = _mm_cvtepi8_epi16(
_mm_loadl_epi64(reinterpret_cast<const __m128i*>(row_ptr + col)));
// dotprod += vec · row
dotprod_32x4 =
_mm_add_epi32(dotprod_32x4, _mm_madd_epi16(vec_16x8, row_16x8));
col += 8;
}
// Postamble for 4x 8-bit inputs.
if (col < (m_cols & ~3)) {
const __m128i vec_32x4 = _mm_cvtepi8_epi32(
_mm_loadl_epi64(reinterpret_cast<const __m128i*>(vectors + col)));
const __m128i row_32x4 = _mm_cvtepi8_epi32(
_mm_loadl_epi64(reinterpret_cast<const __m128i*>(row_ptr + col)));
// dotprod += vec · row
dotprod_32x4 =
_mm_add_epi32(dotprod_32x4, _mm_mullo_epi32(vec_32x4, row_32x4));
col += 4;
}
#endif
// Horizontally add the 4 intermediate sum values to get the final
// dot-prod value for this row.
int32_t sum = ReduceInt32x4(dotprod_32x4);
#if defined(__SSE4_1__) && defined(__clang__)
// SSE 4.1: Don't try to unroll and vectorize this, already done above.
#pragma clang loop unroll(disable) vectorize(disable)
#endif
// Postamble loop for <4x (<16x without SSE 4.1) remaining 8-bit inputs.
for (; col < m_cols; ++col) {
sum += row_ptr[col] * vectors[col];
} // for col
if (row_offset) {
sum -= row_offset;
}
*result += sum * row_scale;
++result;
} // for row
vectors += m_cols;
} // for batch
}
void SseCpuBackendGemm(const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t n_batch,
int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, CpuBackendContext* context) {
using ::tflite::cpu_backend_gemm::Gemm;
using ::tflite::cpu_backend_gemm::GemmParams;
using ::tflite::cpu_backend_gemm::MatrixParams;
MatrixParams<int8_t> lhs_params;
lhs_params.order = cpu_backend_gemm::Order::kRowMajor;
lhs_params.rows = n_output;
lhs_params.cols = n_input;
lhs_params.cache_policy = cpu_backend_gemm::CachePolicy::kCacheIfLargeSpeedup;
MatrixParams<int8_t> rhs_params;
rhs_params.order = cpu_backend_gemm::Order::kColMajor;
rhs_params.rows = n_input;
rhs_params.cols = n_batch;
MatrixParams<int32_t> dst_params;
dst_params.order = cpu_backend_gemm::Order::kColMajor;
dst_params.rows = n_output;
dst_params.cols = n_batch;
GemmParams<int32, int32> gemm_params;
if (bias) {
gemm_params.bias = bias;
}
cpu_backend_gemm::Gemm(lhs_params, input_to_gate_weights, rhs_params, input,
dst_params, scratch, gemm_params, context);
}
void SseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result) {
SseMatrixBatchVectorMultiplyAccumulateImpl(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
/*per_channel_scale=*/nullptr, /*input_offset=*/nullptr,
/*row_sums=*/nullptr);
}
void SseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch,
float* __restrict__ result, CpuBackendContext* context) {
// TODO(b/183178387): Use a proper query to detect AVX/optimized paths.
if (m_rows % 4 == 0 && !context->PreferGemmlowpOnX86()) {
const int32_t* bias = static_cast<const int32_t*>(nullptr);
SseCpuBackendGemm(vectors, bias, matrix, n_batch, m_cols, m_rows,
/*output_zp=*/0, scratch, context);
{
ruy::profiler::ScopeLabel label("HybridMultiplyScalingFactor");
// Multiply by float scaling factors and write to result
const int total_size = n_batch * m_rows;
int i = 0;
for (; i <= total_size - 8; i += 8, result += 8) {
const float batch_scaling_factor0 = scaling_factors[i / m_rows];
const float batch_scaling_factor1 = scaling_factors[(i + 4) / m_rows];
const __m128 scaling_factor0 = _mm_set1_ps(batch_scaling_factor0);
const __m128 scaling_factor1 = _mm_set1_ps(batch_scaling_factor1);
const __m128i scratch_val0 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(scratch + i));
const __m128i scratch_val1 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(scratch + i + 4));
const __m128 float_val0 = _mm_cvtepi32_ps(scratch_val0);
const __m128 float_val1 = _mm_cvtepi32_ps(scratch_val1);
const __m128 prod0 = _mm_mul_ps(float_val0, scaling_factor0);
const __m128 result0 = _mm_add_ps(_mm_load1_ps(result), prod0);
const __m128 prod1 = _mm_mul_ps(float_val1, scaling_factor1);
const __m128 result1 = _mm_add_ps(_mm_load1_ps(result + 4), prod1);
_mm_store_ps(result, result0);
_mm_store_ps(result + 4, result1);
}
scratch += i;
for (; i < total_size; i++) {
const float batch_scaling_factor = scaling_factors[i / m_rows];
int32_t x = *(scratch++);
*result += x * batch_scaling_factor;
++result;
}
}
return;
}
SseMatrixBatchVectorMultiplyAccumulateImpl(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
/*per_channel_scale=*/nullptr, /*input_offset=*/nullptr,
/*row_sums=*/nullptr);
}
void SseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context) {
if ((input_offset != nullptr) && (!compute_row_sums || *compute_row_sums)) {
SseReductionSumVector(matrix, row_sums, m_rows, m_cols);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
SseMatrixBatchVectorMultiplyAccumulateImpl(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result,
per_channel_scale, input_offset, row_sums);
}
namespace {
// Implements sparse-matrix - vector multiply-accumulate.
inline void SseSparseMatrixVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger,
const int m_rows, const int m_cols, const int8_t* __restrict__ vector,
const float scaling_factor, float* __restrict__ result) {
static const std::intptr_t kBlockSize = 16;
TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0);
const uint8_t* __restrict__ ledger_ptr = ledger;
for (std::intptr_t row = 0; row < m_rows; ++row) {
// Initialize the dot product sum for the row to 0.
__m128i dotprod_32x4 = _mm_setzero_si128();
std::intptr_t num_nonzero_blocks = *ledger_ptr++;
for (std::intptr_t i = 0; i < num_nonzero_blocks; i++) {
const std::intptr_t col_index = *ledger_ptr++ * kBlockSize;
const __m128i vec_8x16 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(vector + col_index));
const __m128i row_8x16 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(matrix));
// dotprod += vec · row
dotprod_32x4 =
_mm_add_epi32(dotprod_32x4, DotProdInt8x4x4(vec_8x16, row_8x16));
matrix += kBlockSize;
} // for col
// Horizontally add the 4 intermediate sum values to get the final
// dot-prod value for this row.
int32_t dotprod = ReduceInt32x4(dotprod_32x4);
result[row] += dotprod * scaling_factor;
} // for row
}
// Implements sparse-matrix - batch-of-4-vectors multiply-accumulate.
// The stride between vectors and results must be equal to m_cols.
// Parameter 'batch' is the index of the first batch, must be a multiple of 4.
inline void SseSparseMatrix4VectorsMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger,
const int m_rows, const int m_cols,
const int8_t* __restrict__ const vectors, const __m128 scaling_factors_fx4,
float* __restrict__ const results) {
static const std::intptr_t kBlockSize = 16;
TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0);
const int8_t* __restrict__ vector0 = vectors + 0 * m_cols;
const int8_t* __restrict__ vector1 = vectors + 1 * m_cols;
const int8_t* __restrict__ vector2 = vectors + 2 * m_cols;
const int8_t* __restrict__ vector3 = vectors + 3 * m_cols;
float* __restrict__ result0 = results + 0 * m_rows;
float* __restrict__ result1 = results + 1 * m_rows;
float* __restrict__ result2 = results + 2 * m_rows;
float* __restrict__ result3 = results + 3 * m_rows;
for (std::intptr_t row = 0; row < m_rows; ++row) {
// Initialize the dot product sum for the row to 0.
__m128i dp0_32x4 = _mm_setzero_si128();
__m128i dp1_32x4 = _mm_setzero_si128();
__m128i dp2_32x4 = _mm_setzero_si128();
__m128i dp3_32x4 = _mm_setzero_si128();
std::intptr_t num_nonzero_blocks = *ledger++;
for (std::intptr_t i = 0; i < num_nonzero_blocks; i++) {
const std::intptr_t col_index = *ledger++ * kBlockSize;
// vecN are for different batches
const __m128i vec0_8x16 = _mm_loadu_si128(
reinterpret_cast<const __m128i*>(vector0 + col_index));
const __m128i vec1_8x16 = _mm_loadu_si128(
reinterpret_cast<const __m128i*>(vector1 + col_index));
const __m128i vec2_8x16 = _mm_loadu_si128(
reinterpret_cast<const __m128i*>(vector2 + col_index));
const __m128i vec3_8x16 = _mm_loadu_si128(
reinterpret_cast<const __m128i*>(vector3 + col_index));
const __m128i row_8x16 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(matrix));
// dp += vec · row
// dpN are for different batches
dp0_32x4 = _mm_add_epi32(dp0_32x4, DotProdInt8x4x4(row_8x16, vec0_8x16));
dp1_32x4 = _mm_add_epi32(dp1_32x4, DotProdInt8x4x4(row_8x16, vec1_8x16));
dp2_32x4 = _mm_add_epi32(dp2_32x4, DotProdInt8x4x4(row_8x16, vec2_8x16));
dp3_32x4 = _mm_add_epi32(dp3_32x4, DotProdInt8x4x4(row_8x16, vec3_8x16));
matrix += kBlockSize;
} // for col
// Horizontally add the 4 intermediate values.
const __m128i dp_32x4 =
ReduceInt32x4x4(dp0_32x4, dp1_32x4, dp2_32x4, dp3_32x4);
// Convert to float
const __m128 dp_fx4 = _mm_cvtepi32_ps(dp_32x4);
// Load the results (This is an Accumulate function..)
__m128 result_fx4 =
_mm_set_ps(result3[row], result2[row], result1[row], result0[row]);
// result += dp .* scaling
result_fx4 =
_mm_add_ps(result_fx4, _mm_mul_ps(dp_fx4, scaling_factors_fx4));
// Save the results
result0[row] = GetFloatVectorElement<0>(result_fx4);
result1[row] = GetFloatVectorElement<1>(result_fx4);
result2[row] = GetFloatVectorElement<2>(result_fx4);
result3[row] = GetFloatVectorElement<3>(result_fx4);
} // for row
}
} // namespace
void SseSparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger,
const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ results) {
int batch = 0;
const int kBatchSize4 = 4;
const int n_batch_rounddown_to_batchsize_4 = n_batch & ~(kBatchSize4 - 1);
while (batch < n_batch_rounddown_to_batchsize_4) {
const __m128 scaling_factors_fx4 = _mm_loadu_ps(scaling_factors + batch);
SseSparseMatrix4VectorsMultiplyAccumulate(
matrix, ledger, m_rows, m_cols, vectors, scaling_factors_fx4, results);
batch += kBatchSize4;
vectors += kBatchSize4 * m_cols;
results += kBatchSize4 * m_rows;
} // for batch
while (batch < n_batch) {
SseSparseMatrixVectorMultiplyAccumulate(matrix, ledger, m_rows, m_cols,
vectors, scaling_factors[batch],
results);
++batch;
vectors += m_cols;
results += m_rows;
} // for batch
}
void SseReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
const int output_size, const int reduction_size) {
static constexpr std::intptr_t kBlockSize = 16;
for (std::intptr_t row = 0; row < output_size; ++row) {
const int8_t* __restrict__ row_ptr = input_vector + row * reduction_size;
__m128i row_sum_16x8 = _mm_setzero_si128();
std::intptr_t col = 0;
for (; col < (reduction_size & ~(kBlockSize - 1)); col += kBlockSize) {
const __m128i row_8x16 =
_mm_loadu_si128(reinterpret_cast<const __m128i*>(row_ptr + col));
const __m128i row_16x8 = _mm_maddubs_epi16(_mm_set1_epi8(1), row_8x16);
row_sum_16x8 = _mm_add_epi16(row_sum_16x8, row_16x8);
} // for col
#ifdef __SSE4_1__
// Postamble for 8x 8-bit inputs.
if (col < (reduction_size & ~7)) {
// _mm_loadu_si64 not supported in gcc versions < 9, breaks kokoro build.
const __m128i row_16x8 = _mm_cvtepi8_epi16(
_mm_loadl_epi64(reinterpret_cast<const __m128i*>(row_ptr + col)));
// dotprod += vec · row
row_sum_16x8 = _mm_add_epi16(row_sum_16x8, row_16x8);
col += 8;
}
#endif
const __m128i row_sum_32x4 =
_mm_madd_epi16(row_sum_16x8, _mm_set1_epi16(1));
int32_t row_sum = ReduceInt32x4(row_sum_32x4);
#if defined(__SSE4_1__) && defined(__clang__)
// SSE 4.1: Don't try to unroll and vectorize this, already done above.
#pragma clang loop unroll(disable) vectorize(disable)
#endif
for (; col < reduction_size; col++) {
row_sum += row_ptr[col];
}
output_vector[row] = row_sum;
}
}
} // namespace tensor_utils
} // namespace tflite
#endif // __SSSE3__
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/sse_tensor_utils.cc | C++ | apache-2.0 | 19,868 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_TENSOR_UTILS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_TENSOR_UTILS_H_
// Note: This file is a copy-paste version of neon_tensor_utils.h, only
// difference is in MatrixBatchVectorMultiplyAccumulate and
// SparseMatrixBatchVectorMultiplyAccumulate (other functions do not have SSE
// implementation yet).
// Note: Most of the functions below use NEON_OR_PORTABLE, through the Intel
// NEON_2_SSE translator library. If a native SSE version of a function is
// implemented, replace the appropriate one to SSE_OR_PORTABLE.
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_check.h"
#include "tensorflow/lite/kernels/internal/optimized/neon_tensor_utils_impl.h"
#include "tensorflow/lite/kernels/internal/optimized/sse_check.h"
#include "tensorflow/lite/kernels/internal/optimized/sse_tensor_utils_impl.h"
#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h"
namespace tflite {
namespace tensor_utils {
void MatrixBatchVectorMultiplyAccumulate(const float* matrix, int m_rows,
int m_cols, const float* vector,
int n_batch, float* result) {
NEON_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vector, n_batch, result);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result) {
SSE_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vectors, scaling_factors, n_batch, result);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context) {
SSE_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vectors, scaling_factors, n_batch, result, per_channel_scale,
input_offset, scratch, row_sums, compute_row_sums, context);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
int32_t* __restrict__ scratch, float* __restrict__ result,
CpuBackendContext* __restrict__ context) {
SSE_OR_PORTABLE(MatrixBatchVectorMultiplyAccumulate, matrix, m_rows, m_cols,
vectors, scaling_factors, n_batch, scratch, result, context);
}
void SparseMatrixBatchVectorMultiplyAccumulate1x4(
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
const int32_t* __restrict__ indices, int m_rows, int m_cols,
const float* __restrict__ vector, int n_batch, float* __restrict__ result) {
NEON_OR_PORTABLE(SparseMatrixBatchVectorMultiplyAccumulate1x4, matrix,
segments, indices, m_rows, m_cols, vector, n_batch, result);
}
void SparseMatrixBatchVectorMultiplyAccumulate(
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
float* __restrict__ result) {
NEON_OR_PORTABLE(SparseMatrixBatchVectorMultiplyAccumulate, matrix, ledger,
m_rows, m_cols, vector, n_batch, result);
}
void SparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger,
const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result) {
SSE_OR_PORTABLE(SparseMatrixBatchVectorMultiplyAccumulate, matrix, ledger,
m_rows, m_cols, vectors, scaling_factors, n_batch, result);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* input_zeropoint_times_weights,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int16_t* output, CpuBackendContext* context) {
PortableMatrixBatchVectorMultiplyAccumulate(
input, input_zeropoint_times_weights, input_to_gate_weights, multiplier,
shift, n_batch, n_input, n_output, output_zp, scratch, output, context);
}
void MatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* input_zeropoint_times_weights,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int8_t* output, CpuBackendContext* context) {
PortableMatrixBatchVectorMultiplyAccumulate(
input, input_zeropoint_times_weights, input_to_gate_weights, multiplier,
shift, n_batch, n_input, n_output, output_zp, scratch, output, context);
}
void MatrixBatchVectorMultiply(const int8_t* input, int32_t input_zeropoint,
const int8_t* input_to_gate_weights,
int32_t input_to_gate_effective_scale_a,
int32_t input_to_gate_effective_scale_b,
int32_t n_batch, int32_t n_input, int32_t n_cell,
int8_t* gate_output, int8_t gate_output_zp) {
PortableMatrixBatchVectorMultiply(
input, input_zeropoint, input_to_gate_weights,
input_to_gate_effective_scale_a, input_to_gate_effective_scale_b, n_batch,
n_input, n_cell, gate_output, gate_output_zp);
}
void MatrixBatchVectorMultiply(const int16_t* hidden,
const int8_t* hidden_to_output_weights,
int32_t proj_effective_scale_a,
int32_t proj_effective_scale_b,
const int32_t* gate_bias, int32_t n_batch,
int32_t n_hidden, int32_t n_output,
int32_t output_zp, int8_t* proj_output) {
PortableMatrixBatchVectorMultiply(hidden, hidden_to_output_weights,
proj_effective_scale_a,
proj_effective_scale_b, gate_bias, n_batch,
n_hidden, n_output, output_zp, proj_output);
}
void MatrixScalarMultiplyAccumulate(const int8_t* matrix, int32_t scalar,
int32_t n_row, int32_t n_col,
int32_t* output) {
PortableMatrixScalarMultiplyAccumulate(matrix, scalar, n_row, n_col, output);
}
void ApplyLayerNorm(const int16_t* input, const int16_t* layer_norm_weights,
const int32_t* bias, int32_t layer_norm_scale_a,
int32_t layer_norm_scale_b, int32_t variance_limit,
int n_batch, int n_input, int16_t* output) {
PortableApplyLayerNorm(input, layer_norm_weights, bias, layer_norm_scale_a,
layer_norm_scale_b, variance_limit, n_batch, n_input,
output);
}
void ApplyLayerNormFloat(const int16_t* input,
const int16_t* layer_norm_weights,
int32_t layer_norm_scale_a, int32_t layer_norm_scale_b,
const int32_t* bias, int n_batch, int n_input,
int16_t* output) {
PortableApplyLayerNormFloat(input, layer_norm_weights, layer_norm_scale_a,
layer_norm_scale_b, bias, n_batch, n_input,
output);
}
void ApplySigmoid(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output) {
PortableApplySigmoid(input, n_batch, n_input, output);
}
void ApplySigmoidFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
int16_t* output) {
PortableApplySigmoidFloat(input, n_batch, n_input, output);
}
void ApplyTanh(int32_t intger_bits, const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output) {
PortableApplyTanh(intger_bits, input, n_batch, n_input, output);
}
void ApplyTanhFloat(const int16_t* input, int32_t n_batch, int32_t n_input,
int32_t integer_bits, int16_t* output) {
PortableApplyTanhFloat(input, n_batch, n_input, integer_bits, output);
}
void CwiseMul(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int shift, int16_t* output) {
PortableCwiseMul(input_1, input_2, n_batch, n_input, shift, output);
}
void CwiseMul(const int16_t* input_1, const int16_t* input_2,
int32_t multiplier, int32_t shift, int32_t n_batch,
int32_t n_input, int32_t output_zp, int8_t* output) {
PortableCwiseMul(input_1, input_2, multiplier, shift, n_batch, n_input,
output_zp, output);
}
void CwiseAdd(const int16_t* input_1, const int16_t* input_2, int n_batch,
int n_input, int16_t* output) {
PortableCwiseAdd(input_1, input_2, n_batch, n_input, output);
}
void CwiseClipping(float* vector, const int v_size,
const float clipping_value) {
PortableCwiseClipping(vector, v_size, clipping_value);
}
void CwiseClipping(int16_t* vector, const int v_size,
const int16_t clipping_value) {
PortableCwiseClipping(vector, v_size, clipping_value);
}
void CwiseClipping(int8_t* vector, const int v_size,
const int8_t clipping_value) {
PortableCwiseClipping(vector, v_size, clipping_value);
}
void BatchVectorBatchVectorDotProduct(const int16_t* vector1,
const int16_t* vector2, int v_size,
int n_batch, int32_t* result) {
PortableBatchVectorBatchVectorDotProduct(vector1, vector2, v_size, n_batch,
result);
}
void VectorBatchVectorCwiseProductAccumulate(const int16_t* vector, int v_size,
const int16_t* batch_vector,
int n_batch, int32_t multiplier,
int shift, int16_t* result) {
NEON_OR_PORTABLE(VectorBatchVectorCwiseProductAccumulate, vector, v_size,
batch_vector, n_batch, multiplier, shift, result);
}
float VectorVectorDotProduct(const float* vector1, const float* vector2,
int v_size) {
return NEON_OR_PORTABLE(VectorVectorDotProduct, vector1, vector2, v_size);
}
void Sub1Vector(const float* vector, int v_size, float* result) {
NEON_OR_PORTABLE(Sub1Vector, vector, v_size, result);
}
void Sub1Vector(const int16_t* vector, int v_size, int16_t* result) {
PortableSub1Vector(vector, v_size, result);
}
// Check if all entries of a vector are zero for float.
bool IsZeroVector(const float* vector, int v_size) {
return NEON_OR_PORTABLE(IsZeroVector, vector, v_size);
}
// Check if all entries of a vector are zero for int8.
bool IsZeroVector(const int8_t* vector, int v_size) {
return PortableIsZeroVector(vector, v_size);
}
void VectorScalarMultiply(const int8_t* vector, int v_size, float scale,
float* result) {
NEON_OR_PORTABLE(VectorScalarMultiply, vector, v_size, scale, result);
}
void SymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min_value,
float* max_value, float* scaling_factor) {
NEON_OR_PORTABLE(SymmetricQuantizeFloats, values, size, quantized_values,
min_value, max_value, scaling_factor);
}
void SymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float min_value,
float max_value, float* scaling_factor) {
NEON_OR_PORTABLE(SymmetricQuantizeFloats, values, size, quantized_values,
min_value, max_value, scaling_factor);
}
void AsymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* scaling_factor,
int32_t* offset) {
NEON_OR_PORTABLE(AsymmetricQuantizeFloats, values, size, quantized_values,
scaling_factor, offset);
}
void ReductionSumVector(const float* input_vector, float* output_vector,
int output_size, int reduction_size) {
NEON_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size,
reduction_size);
}
void ReductionSumVector(const int32_t* input_vector, int32_t* output_vector,
int output_size, int reduction_size) {
PortableReductionSumVector(input_vector, output_vector, output_size,
reduction_size);
}
void ReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
int output_size, int reduction_size) {
SSE_OR_PORTABLE(ReductionSumVector, input_vector, output_vector, output_size,
reduction_size);
}
void MeanStddevNormalization(const float* __restrict__ input_vector,
float* __restrict__ output_vector, int v_size,
int n_batch) {
PortableMeanStddevNormalization(input_vector, output_vector, v_size, n_batch);
}
void TwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
const int8_t* recurrent, int8_t recurrent_zp,
int32_t input_effective_scale_a,
int32_t input_effective_scale_b,
int32_t recurrent_effective_scale_a,
int32_t recurrent_effective_scale_b, int32_t n_batch,
int32_t n_cell, int16_t* output) {
PortableTwoGateSaturatingAdd(
input, input_zp, recurrent, recurrent_zp, input_effective_scale_a,
input_effective_scale_b, recurrent_effective_scale_a,
recurrent_effective_scale_b, n_batch, n_cell, output);
}
} // namespace tensor_utils
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_TENSOR_UTILS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/sse_tensor_utils.h | C++ | apache-2.0 | 15,212 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_TENSOR_UTILS_IMPL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_TENSOR_UTILS_IMPL_H_
#include <cstdint>
#include "tensorflow/lite/kernels/cpu_backend_context.h"
#if defined(_MSC_VER)
#define __restrict__ __restrict
#endif
namespace tflite {
namespace tensor_utils {
#ifdef __SSSE3__
// Matrix multiplication for quantized values using symmetric quantization.
void SseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result);
// Matrix multiplication for quantized values using symmetric quantization
// with additional scratch memory for GEMM operation prior to scaling.
void SseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch, int32_t* scratch,
float* __restrict__ result, CpuBackendContext* context);
// Matrix multiplication for quantized values using asymmetric quantization.
void SseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context);
// Matrix multiplication for quantized values using symmetric quantization.
// Sparse version.
void SseSparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* __restrict__ ledger,
const int m_rows, const int m_cols, const int8_t* __restrict__ vectors,
const float* __restrict__ scaling_factors, int n_batch,
float* __restrict__ result);
void SseReductionSumVector(const int8_t* input_vector, int32_t* output_vector,
const int output_size, const int reduction_size);
#endif // __SSSE3__
} // namespace tensor_utils
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_SSE_TENSOR_UTILS_IMPL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/optimized/sse_tensor_utils_impl.h | C++ | apache-2.0 | 2,992 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/tensor_ctypes.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
inline RuntimeShape GetTensorShape(std::vector<int32_t> data) {
return RuntimeShape(data.size(), data.data());
}
// A list of tensors in a format that can be used by kernels like split and
// concatenation.
template <typename T>
class VectorOfTensors {
public:
// Build with the tensors in 'tensor_list'.
VectorOfTensors(const TfLiteContext& context,
const TfLiteIntArray& tensor_list) {
int num_tensors = tensor_list.size;
all_data_.reserve(num_tensors);
all_shape_.reserve(num_tensors);
all_shape_ptr_.reserve(num_tensors);
for (int i = 0; i < num_tensors; ++i) {
TfLiteTensor* t = &context.tensors[tensor_list.data[i]];
all_data_.push_back(GetTensorData<T>(t));
all_shape_.push_back(GetTensorShape(t));
}
// Taking the pointer from inside a std::vector is only OK if the vector is
// never modified, so we populate all_shape in the previous loop and then we
// are free to grab iterators here.
for (int i = 0; i < num_tensors; ++i) {
all_shape_ptr_.push_back(&all_shape_[i]);
}
}
// Return a pointer to the data pointers of all tensors in the list. For
// example:
// float* const* f = v.data();
// f[0][1] is the second element of the first tensor.
T* const* data() const { return all_data_.data(); }
// Return a pointer the shape pointers of all tensors in the list. For
// example:
// const RuntimeShape* const* d = v.dims();
// dims[1] are the dimensions of the second tensor in the list.
const RuntimeShape* const* shapes() const { return all_shape_ptr_.data(); }
private:
std::vector<T*> all_data_;
std::vector<RuntimeShape> all_shape_;
std::vector<RuntimeShape*> all_shape_ptr_;
};
// A list of quantized tensors in a format that can be used by kernels like
// split and concatenation.
class VectorOfQuantizedTensors : public VectorOfTensors<uint8_t> {
public:
// Build with the tensors in 'tensor_list'.
VectorOfQuantizedTensors(const TfLiteContext& context,
const TfLiteIntArray& tensor_list)
: VectorOfTensors<uint8_t>(context, tensor_list) {
for (int i = 0; i < tensor_list.size; ++i) {
TfLiteTensor* t = &context.tensors[tensor_list.data[i]];
zero_point_.push_back(t->params.zero_point);
scale_.push_back(t->params.scale);
}
}
const float* scale() const { return scale_.data(); }
const int32_t* zero_point() const { return zero_point_.data(); }
private:
std::vector<int32_t> zero_point_;
std::vector<float> scale_;
};
// Writes randomly accessed values from `input` sequentially into `output`.
template <typename T>
class SequentialTensorWriter {
public:
SequentialTensorWriter(const TfLiteTensor* input, TfLiteTensor* output) {
input_data_ = GetTensorData<T>(input);
output_ptr_ = GetTensorData<T>(output);
}
SequentialTensorWriter(const T* input_data, T* output_data)
: input_data_(input_data), output_ptr_(output_data) {}
void Write(int position) { *output_ptr_++ = input_data_[position]; }
void WriteN(int position, int len) {
memcpy(output_ptr_, &input_data_[position], sizeof(T) * len);
output_ptr_ += len;
}
private:
const T* input_data_;
T* output_ptr_;
};
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_PORTABLE_TENSOR_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/portable_tensor.h | C++ | apache-2.0 | 4,305 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include <algorithm>
#include <cmath>
#include <limits>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
namespace tflite {
namespace {
// These constants are used to manipulate the binary representation of doubles.
// Double-precision binary64 floating point format is:
// Bit | 63 | 62-52 | 51-0 |
// | Sign | Exponent | Fraction |
// To avoid 64-bit integers as much as possible, I break this into high and
// low 32-bit chunks. High is:
// Bit | 31 | 30-20 | 19-0 |
// | Sign | Exponent | High Fraction |
// Low is:
// Bit | 31-0 |
// | Low Fraction |
// We then access the components through logical bit-wise operations to
// extract the parts needed, with the positions and masks derived from the
// layout shown above.
constexpr uint64_t kSignMask = 0x8000000000000000LL;
constexpr uint64_t kExponentMask = 0x7ff0000000000000LL;
constexpr int32_t kExponentShift = 52;
constexpr int32_t kExponentBias = 1023;
constexpr uint32_t kExponentIsBadNum = 0x7ff;
constexpr uint64_t kFractionMask = 0x000fffffffc00000LL;
constexpr uint32_t kFractionShift = 22;
constexpr uint32_t kFractionRoundingMask = 0x003fffff;
constexpr uint32_t kFractionRoundingThreshold = 0x00200000;
} // namespace
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int* shift) {
if (double_multiplier == 0.) {
*quantized_multiplier = 0;
*shift = 0;
return;
}
#ifdef TFLITE_EMULATE_FLOAT
// If we're trying to avoid the use of floating-point instructions (for
// example on microcontrollers) then use an alternative implementation
// that only requires integer and bitwise operations. To enable this, you
// need to set the define during the build process for your platform.
int64_t q_fixed = IntegerFrExp(double_multiplier, shift);
#else // TFLITE_EMULATE_FLOAT
const double q = std::frexp(double_multiplier, shift);
auto q_fixed = static_cast<int64_t>(TfLiteRound(q * (1ll << 31)));
#endif // TFLITE_EMULATE_FLOAT
TFLITE_CHECK(q_fixed <= (1ll << 31));
if (q_fixed == (1ll << 31)) {
q_fixed /= 2;
++*shift;
}
TFLITE_CHECK_LE(q_fixed, std::numeric_limits<int32_t>::max());
// A shift amount smaller than -31 would cause all bits to be shifted out
// and thus all results would be zero. We implement that instead with
// q_fixed==0, so as to avoid hitting issues with right-shift
// operations with shift amounts greater than 31. Note that this happens
// roughly when abs(double_multiplier) < 2^-31 and the present handling means
// that we're effectively flushing tiny double_multiplier's to zero.
// We could conceivably handle values in the range (roughly) [32, 63]
// as 'denormals' i.e. (shift==0, q_fixed < 2^30). In that point of view
// the present handling is just doing 'flush denormals to zero'. We could
// reconsider and actually generate nonzero denormals if a need arises.
if (*shift < -31) {
*shift = 0;
q_fixed = 0;
}
*quantized_multiplier = static_cast<int32_t>(q_fixed);
}
void QuantizeMultiplierGreaterThanOne(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift) {
TFLITE_CHECK_GT(double_multiplier, 1.);
QuantizeMultiplier(double_multiplier, quantized_multiplier, left_shift);
TFLITE_CHECK_GE(*left_shift, 0);
}
void QuantizeMultiplierSmallerThanOneExp(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift) {
TFLITE_CHECK_LT(double_multiplier, 1.);
TFLITE_CHECK_GT(double_multiplier, 0.);
int shift;
QuantizeMultiplier(double_multiplier, quantized_multiplier, &shift);
TFLITE_CHECK_LE(shift, 0);
*left_shift = shift;
}
int64_t IntegerFrExp(double input, int* shift) {
// Make sure our assumptions about the double layout hold.
TFLITE_CHECK_EQ(8, sizeof(double));
// We want to access the bits of the input double value directly, which is
// tricky to do safely, so use a union to handle the casting.
union {
double double_value;
uint64_t double_as_uint;
} cast_union;
cast_union.double_value = input;
const uint64_t u = cast_union.double_as_uint;
// If the bitfield is all zeros apart from the sign bit, this is a normalized
// zero value, so return standard values for this special case.
if ((u & ~kSignMask) == 0) {
*shift = 0;
return 0;
}
// Deal with NaNs and Infs, which are always indicated with a fixed pattern in
// the exponent, and distinguished by whether the fractions are zero or
// non-zero.
const uint32_t exponent_part = ((u & kExponentMask) >> kExponentShift);
if (exponent_part == kExponentIsBadNum) {
*shift = std::numeric_limits<int>::max();
if (u & kFractionMask) {
// NaN, so just return zero (with the exponent set to INT_MAX).
return 0;
} else {
// Infinity, so return +/- INT_MAX.
if (u & kSignMask) {
return std::numeric_limits<int64_t>::min();
} else {
return std::numeric_limits<int64_t>::max();
}
}
}
// The shift is fairly easy to extract from the high bits of the double value,
// just by masking it out and applying a bias. The std::frexp() implementation
// always returns values between 0.5 and 1.0 though, whereas the exponent
// assumes 1.0 to 2.0 is the standard range, so I add on one to match that
// interface.
*shift = (exponent_part - kExponentBias) + 1;
// There's an implicit high bit in the double format definition, so make sure
// we include that at the top, and then reconstruct the rest of the fractional
// value from the remaining fragments.
int64_t fraction = 0x40000000 + ((u & kFractionMask) >> kFractionShift);
// We're cutting off some bits at the bottom, so to exactly match the standard
// frexp implementation here we'll apply rounding by adding one to the least
// significant bit of the result if the discarded portion is over half of the
// maximum.
if ((u & kFractionRoundingMask) > kFractionRoundingThreshold) {
fraction += 1;
}
// Negate the fraction if the sign bit was set.
if (u & kSignMask) {
fraction *= -1;
}
return fraction;
}
double DoubleFromFractionAndShift(int64_t fraction, int shift) {
union {
double double_value;
uint64_t double_as_uint;
} result;
// Detect NaNs and infinities.
if (shift == std::numeric_limits<int>::max()) {
if (fraction == 0) {
return std::numeric_limits<double>::quiet_NaN();
} else if (fraction > 0) {
return std::numeric_limits<double>::infinity();
} else {
return -std::numeric_limits<double>::infinity();
}
}
// Return a normalized zero for a zero fraction.
if (fraction == 0) {
result.double_as_uint = 0;
return result.double_value;
}
bool is_negative = (fraction < 0);
int64_t encoded_fraction = is_negative ? -fraction : fraction;
int64_t encoded_shift = (shift - 1);
while (encoded_fraction < 0x40000000) {
encoded_fraction *= 2;
encoded_shift -= 1;
}
while (encoded_fraction > 0x80000000) {
encoded_fraction /= 2;
encoded_shift += 1;
}
encoded_fraction -= 0x40000000;
if (encoded_shift < -1022) {
encoded_shift = -1023;
} else if (encoded_shift > 1022) {
encoded_shift = 1023;
}
encoded_shift += kExponentBias;
uint64_t encoded_sign = is_negative ? kSignMask : 0;
result.double_as_uint = encoded_sign | (encoded_shift << kExponentShift) |
(encoded_fraction << kFractionShift);
return result.double_value;
}
double IntegerDoubleMultiply(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
// Detect NaNs and infinities.
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return std::numeric_limits<double>::quiet_NaN();
}
const int result_shift = a_shift + b_shift + 1;
const int64_t result_fraction = (a_fraction * b_fraction) >> 32;
return DoubleFromFractionAndShift(result_fraction, result_shift);
}
int IntegerDoubleCompare(double a, double b) {
int a_shift;
const int64_t a_fraction = IntegerFrExp(a, &a_shift);
int b_shift;
const int64_t b_fraction = IntegerFrExp(b, &b_shift);
// Detect NaNs and infinities.
if (a_shift == std::numeric_limits<int>::max() ||
(b_shift == std::numeric_limits<int>::max())) {
return 1;
}
if ((a_fraction == 0) && (b_fraction < 0)) {
return 1;
} else if ((a_fraction < 0) && (b_fraction == 0)) {
return -1;
} else if (a_shift < b_shift) {
return -1;
} else if (a_shift > b_shift) {
return 1;
} else if (a_fraction < b_fraction) {
return -1;
} else if (a_fraction > b_fraction) {
return 1;
} else {
return 0;
}
}
void PreprocessSoftmaxScaling(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier, int* left_shift) {
// If the overall multiplier (input and beta) is large, then exp() of an
// input difference of 1 scaled by this will be large. In other words, we
// can cap the multiplier and know that, when it is used, the output will be
// (round to) zero wherever the input is not at the maximum value.
// If the overall scale is less than one, and input_integer_bits=0, then the
// result is double equivalent of Q0.31 (actually with more precision). Thus
// this generates a Q(input_integer_bits).(31-input_integer_bits)
// representation.
#ifdef TFLITE_EMULATE_FLOAT
const double input_beta = IntegerDoubleMultiply(beta, input_scale);
int shift;
int64_t fraction = IntegerFrExp(input_beta, &shift);
shift += (31 - input_integer_bits);
double input_beta_real_multiplier =
DoubleFromFractionAndShift(fraction, shift);
if (IntegerDoubleCompare(input_beta_real_multiplier, (1ll << 31) - 1.0) > 0) {
input_beta_real_multiplier = (1ll << 31) - 1.0;
}
#else // TFLITE_EMULATE_FLOAT
const double input_beta_real_multiplier = std::min<double>(
beta * input_scale * (1 << (31 - input_integer_bits)), (1ll << 31) - 1.0);
#endif // TFLITE_EMULATE_FLOAT
QuantizeMultiplierGreaterThanOne(input_beta_real_multiplier,
quantized_multiplier, left_shift);
}
void PreprocessLogSoftmaxScalingExp(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier,
int* left_shift,
int32_t* reverse_scaling_divisor,
int* reverse_scaling_left_shift) {
PreprocessSoftmaxScaling(beta, input_scale, input_integer_bits,
quantized_multiplier, left_shift);
// Also calculate what amounts to the inverse scaling factor for the input.
const double real_reverse_scaling_divisor =
(1 << (31 - *left_shift)) / static_cast<double>(*quantized_multiplier);
tflite::QuantizeMultiplierSmallerThanOneExp(real_reverse_scaling_divisor,
reverse_scaling_divisor,
reverse_scaling_left_shift);
}
int CalculateInputRadius(int input_integer_bits, int input_left_shift,
int total_signed_bits) {
#ifdef TFLITE_EMULATE_FLOAT
int64_t result = (1 << input_integer_bits) - 1;
result <<= (total_signed_bits - input_integer_bits);
result >>= input_left_shift;
return result;
#else // TFLITE_EMULATE_FLOAT
const double max_input_rescaled =
1.0 * ((1 << input_integer_bits) - 1) *
(1ll << (total_signed_bits - input_integer_bits)) /
(1ll << input_left_shift);
// Tighten bound using floor. Suppose that we could use the exact value.
// After scaling the difference, the result would be at the maximum. Thus we
// must ensure that our value has lower magnitude.
return static_cast<int>(std::floor(max_input_rescaled));
#endif // TFLITE_EMULATE_FLOAT
}
void NudgeQuantizationRange(const float min, const float max,
const int quant_min, const int quant_max,
float* nudged_min, float* nudged_max,
float* nudged_scale) {
// This code originates from tensorflow/core/kernels/fake_quant_ops_functor.h.
const float quant_min_float = static_cast<float>(quant_min);
const float quant_max_float = static_cast<float>(quant_max);
*nudged_scale = (max - min) / (quant_max_float - quant_min_float);
const float zero_point_from_min = quant_min_float - min / *nudged_scale;
uint16_t nudged_zero_point;
if (zero_point_from_min < quant_min_float) {
nudged_zero_point = static_cast<uint16_t>(quant_min);
} else if (zero_point_from_min > quant_max_float) {
nudged_zero_point = static_cast<uint16_t>(quant_max);
} else {
nudged_zero_point = static_cast<uint16_t>(TfLiteRound(zero_point_from_min));
}
*nudged_min = (quant_min_float - nudged_zero_point) * (*nudged_scale);
*nudged_max = (quant_max_float - nudged_zero_point) * (*nudged_scale);
}
void FakeQuantizeArray(const float nudged_scale, const float nudged_min,
const float nudged_max, const float* input_data,
float* output_data, const float size) {
// This code originates from tensorflow/core/kernels/fake_quant_ops_functor.h.
const float inv_nudged_scale = 1.0f / nudged_scale;
for (int i = 0; i < size; i++) {
const float src_val = input_data[i];
const float clamped = std::min(nudged_max, std::max(nudged_min, src_val));
const float clamped_shifted = clamped - nudged_min;
const float dst_val =
TfLiteRound(clamped_shifted * inv_nudged_scale) * nudged_scale +
nudged_min;
output_data[i] = dst_val;
}
}
bool CheckedLog2(const float x, int* log2_result) {
// Using TfLiteRound instead of std::round and std::log instead of
// std::log2 to work around these functions being missing in a toolchain
// used in some TensorFlow tests as of May 2018.
const float x_log2 = std::log(x) * (1.0f / std::log(2.0f));
const float x_log2_rounded = TfLiteRound(x_log2);
const float x_log2_fracpart = x_log2 - x_log2_rounded;
*log2_result = static_cast<int>(x_log2_rounded);
return std::abs(x_log2_fracpart) < 1e-3f;
}
void QuantizeMultiplierArray(const double* effective_scales, size_t size,
int32_t* effective_scale_significand,
int* effective_shift) {
for (size_t i = 0; i < size; ++i) {
QuantizeMultiplier(effective_scales[i], &effective_scale_significand[i],
&effective_shift[i]);
}
}
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/quantization_util.cc | C++ | apache-2.0 | 15,714 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_
#include <cmath>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
// Given the min and max values of a float array, return
// reasonable quantization parameters to use for this array.
template <typename T>
QuantizationParams ChooseQuantizationParams(double rmin, double rmax,
bool narrow_range) {
const T qmin = std::numeric_limits<T>::min() + (narrow_range ? 1 : 0);
const T qmax = std::numeric_limits<T>::max();
const double qmin_double = qmin;
const double qmax_double = qmax;
// 0 should always be a representable value. Let's assume that the initial
// min,max range contains 0.
TFLITE_CHECK_LE(rmin, 0.);
TFLITE_CHECK_GE(rmax, 0.);
if (rmin == rmax) {
// Special case where the min,max range is a point. Should be {0}.
TFLITE_CHECK_EQ(rmin, 0.);
TFLITE_CHECK_EQ(rmax, 0.);
QuantizationParams quantization_params;
quantization_params.zero_point = 0;
quantization_params.scale = 0.;
return quantization_params;
}
// General case.
//
// First determine the scale.
const double scale = (rmax - rmin) / (qmax_double - qmin_double);
// Zero-point computation.
// First the initial floating-point computation. The zero-point can be
// determined from solving an affine equation for any known pair
// (real value, corresponding quantized value).
// We know two such pairs: (rmin, qmin) and (rmax, qmax).
// The arithmetic error on the zero point computed from either pair
// will be roughly machine_epsilon * (sum of absolute values of terms)
// so we want to use the variant that adds the smaller terms.
const double zero_point_from_min = qmin_double - rmin / scale;
const double zero_point_from_max = qmax_double - rmax / scale;
const double zero_point_from_min_error =
std::abs(qmin_double) + std::abs(rmin / scale);
const double zero_point_from_max_error =
std::abs(qmax_double) + std::abs(rmax / scale);
const double zero_point_double =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
// Now we need to nudge the zero point to be an integer
// (our zero points are integer, and this is motivated by the requirement
// to be able to represent the real value "0" exactly as a quantized value,
// which is required in multiple places, for example in Im2col with SAME
// padding).
T nudged_zero_point = 0;
if (zero_point_double < qmin_double) {
nudged_zero_point = qmin;
} else if (zero_point_double > qmax_double) {
nudged_zero_point = qmax;
} else {
nudged_zero_point = static_cast<T>(round(zero_point_double));
}
// The zero point should always be in the range of quantized value,
// [qmin, qmax].
TFLITE_CHECK_GE(nudged_zero_point, qmin);
TFLITE_CHECK_LE(nudged_zero_point, qmax);
// Finally, store the result nudged quantization params.
QuantizationParams quantization_params;
quantization_params.zero_point = nudged_zero_point;
quantization_params.scale = scale;
return quantization_params;
}
template <typename T>
QuantizationParams ChooseQuantizationParams(double rmin, double rmax) {
return ChooseQuantizationParams<T>(rmin, rmax, false);
}
// Converts a floating-point number to an integer. For all inputs x where
// static_cast<IntOut>(x) is legal according to the C++ standard, the result
// is identical to that cast (i.e. the result is x with its fractional part
// truncated whenever that is representable as IntOut).
//
// static_cast would cause undefined behavior for the following cases, which
// have well-defined behavior for this function:
//
// 1. If x is NaN, the result is zero.
//
// 2. If the truncated form of x is above the representable range of IntOut,
// the result is std::numeric_limits<IntOut>::max().
//
// 3. If the truncated form of x is below the representable range of IntOut,
// the result is std::numeric_limits<IntOut>::min().
//
// Note that cases #2 and #3 cover infinities as well as finite numbers.
//
// The range of FloatIn must include the range of IntOut, otherwise
// the results are undefined.
// TODO(sfeuz): Replace by absl::SafeCast once available.
template <class IntOut, class FloatIn>
IntOut SafeCast(FloatIn x) {
static_assert(!std::numeric_limits<FloatIn>::is_integer,
"FloatIn is integer");
static_assert(std::numeric_limits<IntOut>::is_integer,
"IntOut is not integer");
static_assert(std::numeric_limits<IntOut>::radix == 2, "IntOut is base 2");
// Special case NaN, for which the logic below doesn't work.
if (std::isnan(x)) {
return 0;
}
// Negative values all clip to zero for unsigned results.
if (!std::numeric_limits<IntOut>::is_signed && x < 0) {
return 0;
}
// Handle infinities.
if (std::isinf(x)) {
return x < 0 ? std::numeric_limits<IntOut>::min()
: std::numeric_limits<IntOut>::max();
}
// Set exp such that x == f * 2^exp for some f with |f| in [0.5, 1.0),
// unless x is zero in which case exp == 0. Note that this implies that the
// magnitude of x is strictly less than 2^exp.
int exp = 0;
std::frexp(x, &exp);
// Let N be the number of non-sign bits in the representation of IntOut. If
// the magnitude of x is strictly less than 2^N, the truncated version of x
// is representable as IntOut. The only representable integer for which this
// is not the case is kMin for signed types (i.e. -2^N), but that is covered
// by the fall-through below.
if (exp <= std::numeric_limits<IntOut>::digits) {
return x;
}
// Handle numbers with magnitude >= 2^N.
return x < 0 ? std::numeric_limits<IntOut>::min()
: std::numeric_limits<IntOut>::max();
}
// Decompose a double multiplier into a Q0.31 int32 representation of its
// significand, and shift representation of NEGATIVE its exponent ---
// this is intended as a RIGHT-shift.
//
// Restricted to the case where the multiplier < 1 (and non-negative).
void QuantizeMultiplierSmallerThanOneExp(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift);
// Decompose a double multiplier into a Q0.31 int32 representation of its
// significand, and shift representation of its exponent.
//
// Restricted to the case where the multiplier > 1.
void QuantizeMultiplierGreaterThanOne(double double_multiplier,
int32_t* quantized_multiplier,
int* left_shift);
// Decompose a double multiplier into a Q0.31 int32 representation of its
// significand, and shift representation of its exponent.
//
// Handles an arbitrary positive multiplier. The 'shift' output-value is
// basically the 'floating-point exponent' of the multiplier:
// Negative for a right-shift (when the multiplier is <1), positive for a
// left-shift (when the multiplier is >1)
void QuantizeMultiplier(double double_multiplier, int32_t* quantized_multiplier,
int* shift);
// Splits a double input value into a returned fraction, and a shift value from
// the exponent, using only bitwise and integer operations to support
// microcontrollers and other environments without floating-point support.
//
// This is designed to be a replacement for how std::frexp() is used within the
// QuantizeMultiplier() function, and so has a different signature than the
// standard version, returning a 64-bit integer rather than a double. This
// result has a maximum value of 1<<31, with the fraction expressed as a
// proportion of that maximum.
//
// std::frexp() returns NaNs and infinities unmodified, but since we're
// returning integers that can't represent those values, instead we return
// a shift of std::numeric_limits<int>::max() for all bad numbers, with an int64
// result of 0 for NaNs, std:numeric_limits<int64_t>::max() for +INFINITY, and
// std::numeric_limits<int64_t>::min() for -INFINITY. Denormalized inputs will
// result in return values that end up truncating some bits at the end,
// reflecting the loss of precision inherent in denormalization.
int64_t IntegerFrExp(double input, int* shift);
// Converts an integer fraction in the format produced by IntegerFrExp (where
// 0x40000000 is 1.0) and an exponent shift (between -1022 and +1022) into an
// IEEE binary64 double format result. The implementation uses only integer and
// bitwise operators, so no floating point hardware support or emulation is
// needed. This is here so quantized operations can run non-time-critical
// preparation calculations on microcontrollers and other platforms without
// float support.
double DoubleFromFractionAndShift(int64_t fraction, int shift);
// Performs a multiplication of two numbers in double format, using only integer
// and bitwise instructions. This is aimed at supporting housekeeping functions
// for quantized operations on microcontrollers without floating-point hardware.
double IntegerDoubleMultiply(double a, double b);
// Returns -1 if a is less than b, 0 if a and b are equal, and +1 if a is
// greater than b. It is implemented using only integer and logical instructions
// so that it can be easily run on microcontrollers for quantized operations.
int IntegerDoubleCompare(double a, double b);
// This first creates a multiplier in a double equivalent of
// Q(input_integer_bits).(31-input_integer_bits) representation, with extra
// precision in the double's fractional bits. It then splits the result into
// significand and exponent.
void PreprocessSoftmaxScaling(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier, int* left_shift);
// Like PreprocessSoftmaxScaling, but inverse scaling factors also calculated.
void PreprocessLogSoftmaxScalingExp(double beta, double input_scale,
int input_integer_bits,
int32_t* quantized_multiplier,
int* left_shift,
int32_t* reverse_scaling_divisor,
int* reverse_scaling_left_shift);
// Calculate the largest input that will result in a within-bounds intermediate
// result within MultiplyByQuantizedMultiplierGreaterThanOne. In other words,
// it must not overflow before we reduce the value by multiplication by the
// input multiplier. The negative radius is used as the minimum difference in
// Softmax.
int CalculateInputRadius(int input_integer_bits, int input_left_shift,
int total_signed_bits = 31);
// Nudges a min/max quantization range to ensure zero is zero.
// Gymnastics with nudged zero point is to ensure that real zero maps to
// an integer, which is required for e.g. zero-padding in convolutional layers.
// Outputs nudged_min, nudged_max, nudged_scale.
void NudgeQuantizationRange(const float min, const float max,
const int quant_min, const int quant_max,
float* nudged_min, float* nudged_max,
float* nudged_scale);
// Fake quantizes (quantizes and dequantizes) input_data using the scale,
// nudged_min, and nudged_max from NudgeQuantizationRange. This matches the code
// in TensorFlow's FakeQuantizeWithMinMaxVarsFunctor.
void FakeQuantizeArray(const float nudged_scale, const float nudged_min,
const float nudged_max, const float* input_data,
float* output_data, const float size);
// If x is approximately a power of two (with any positive or negative
// exponent), stores that exponent (i.e. log2(x)) in *log2_result, otherwise
// returns false.
bool CheckedLog2(const float x, int* log2_result);
// Decomposes an array of double multipliers into a Q0.31 int32 representation
// of its significand, and shift representation of its exponent.
//
// Handles an arbitrary multiplier. The 'shift' output-value is
// basically the 'floating-point exponent' of the multiplier:
// Negative for a right-shift (when the multiplier is <1), positive for a
// left-shift (when the multiplier is >1)
void QuantizeMultiplierArray(const double* effective_scales, size_t size,
int32_t* effective_scale_significand,
int* effective_shift);
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_QUANTIZATION_UTIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/quantization_util.h | C++ | apache-2.0 | 13,487 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
#include <type_traits>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] + input2_data[i], activation_min, activation_max);
}
}
// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.
// This function is used for 8-bit as well as for 16-bit, but the accumulator
// is 32-bit for both cases. The overflow does not happen due to the
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
template <typename T>
inline void AddElementwise(int size, const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data) {
TFLITE_DCHECK_GT(params.input1_offset, -std::numeric_limits<T>::max());
TFLITE_DCHECK_GT(params.input2_offset, -std::numeric_limits<T>::max());
TFLITE_DCHECK_LT(params.input1_offset, std::numeric_limits<T>::max());
TFLITE_DCHECK_LT(params.input2_offset, std::numeric_limits<T>::max());
for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
const int32_t input2_val = params.input2_offset + input2_data[i];
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<T>(clamped_output);
}
}
// Scalar-broadcast add that can be used for inner loop of more general
// broadcast add, so that, for example, scalar-broadcast with batch will still
// be fast.
inline void AddScalarBroadcast(int size, const ArithmeticParams& params,
uint8_t input1_data, const uint8_t* input2_data,
uint8_t* output_data) {
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
const int32_t input1_val = params.input1_offset + input1_data;
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
for (int i = 0; i < size; ++i) {
const int32_t input2_val = params.input2_offset + input2_data[i];
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<uint8_t>(clamped_output);
}
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8_t* input1_data,
const RuntimeShape& input2_shape, const uint8_t* input2_data,
const RuntimeShape& output_shape, uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
TFLITE_DCHECK_GT(params.input1_offset, -256);
TFLITE_DCHECK_GT(params.input2_offset, -256);
TFLITE_DCHECK_LT(params.input1_offset, 256);
TFLITE_DCHECK_LT(params.input2_offset, 256);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void AddGeneralParamScale(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int16_t* input1_data,
const RuntimeShape& input2_shape,
const int16_t* input2_data,
const RuntimeShape& output_shape,
int16_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
int max_value = std::numeric_limits<int16_t>::max();
TFLITE_DCHECK_GT(params.input1_offset, -max_value);
TFLITE_DCHECK_GT(params.input2_offset, -max_value);
TFLITE_DCHECK_LT(params.input1_offset, max_value);
TFLITE_DCHECK_LT(params.input2_offset, max_value);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16_t* input1_data,
const RuntimeShape& input2_shape, const int16_t* input2_data,
const RuntimeShape& output_shape, int16_t* output_data,
bool pot_scale = true) {
if (!pot_scale) {
AddGeneralParamScale(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
return;
}
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int input1_shift = params.input1_shift;
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
const int16_t output_activation_min = params.quantized_activation_min;
const int16_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK(input1_shift == 0 || params.input2_shift == 0);
TFLITE_DCHECK_LE(input1_shift, 0);
TFLITE_DCHECK_LE(params.input2_shift, 0);
const int16_t* not_shift_input =
input1_shift == 0 ? input1_data : input2_data;
const int16_t* shift_input = input1_shift == 0 ? input2_data : input1_data;
const int input_right_shift =
input1_shift == 0 ? -params.input2_shift : -input1_shift;
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 input_ready_scaled = F0::FromRaw(not_shift_input[i]);
F0 scaled_input = F0::FromRaw(
gemmlowp::RoundingDivideByPOT(shift_input[i], input_right_shift));
F0 result = gemmlowp::SaturatingAdd(scaled_input, input_ready_scaled);
const int16_t raw_output = result.raw();
const int16_t clamped_output = std::min(
output_activation_max, std::max(output_activation_min, raw_output));
output_data[i] = clamped_output;
}
}
template <typename T>
inline typename std::enable_if<!is_small_integer<T>::value, void>::type
BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
&desc2);
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
T activation_min, activation_max;
GetActivationParams(params, &activation_min, &activation_max);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
// typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
//
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
output_data[Offset(extended_output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax<T>(
input1_data[SubscriptToIndex(desc1, b, y, x, c)] +
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
activation_min, activation_max);
}
}
}
}
}
// This function is used for 8-bit as well as for 16-bit, but the accumulator
// is 32-bit for both cases. The overflow does not happen due to the
// choice of the shift (20 or 15, accordingly - see add.cc for more comments).
template <typename T>
inline typename std::enable_if<is_small_integer<T>::value, void>::type
BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
&desc2);
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
// typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
//
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
const int32_t input1_val =
params.input1_offset +
input1_data[SubscriptToIndex(desc1, b, y, x, c)];
const int32_t input2_val =
params.input2_offset +
input2_data[SubscriptToIndex(desc2, b, y, x, c)];
const int32_t shifted_input1_val =
input1_val * (1 << params.left_shift);
const int32_t shifted_input2_val =
input2_val * (1 << params.left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier,
params.input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier,
params.input2_shift);
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[Offset(extended_output_shape, b, y, x, c)] =
static_cast<T>(clamped_output);
}
}
}
}
}
inline void BroadcastAddFivefold(const ArithmeticParams& unswitched_params,
const RuntimeShape& unswitched_input1_shape,
const uint8_t* unswitched_input1_data,
const RuntimeShape& unswitched_input2_shape,
const uint8_t* unswitched_input2_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
ArithmeticParams switched_params = unswitched_params;
switched_params.input1_offset = unswitched_params.input2_offset;
switched_params.input1_multiplier = unswitched_params.input2_multiplier;
switched_params.input1_shift = unswitched_params.input2_shift;
switched_params.input2_offset = unswitched_params.input1_offset;
switched_params.input2_multiplier = unswitched_params.input1_multiplier;
switched_params.input2_shift = unswitched_params.input1_shift;
const bool use_unswitched =
unswitched_params.broadcast_category ==
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
const ArithmeticParams& params =
use_unswitched ? unswitched_params : switched_params;
const uint8_t* input1_data =
use_unswitched ? unswitched_input1_data : unswitched_input2_data;
const uint8_t* input2_data =
use_unswitched ? unswitched_input2_data : unswitched_input1_data;
// Fivefold nested loops. The second input resets its position for each
// iteration of the second loop. The first input resets its position at the
// beginning of the fourth loop. The innermost loop is an elementwise add of
// sections of the arrays.
uint8_t* output_data_ptr = output_data;
const uint8_t* input1_data_ptr = input1_data;
const uint8_t* input2_data_reset = input2_data;
// In the fivefold pattern, y0, y2 and y4 are not broadcast, and so shared
// between input shapes. y3 for input 1 is always broadcast, and so the
// dimension there is 1, whereas optionally y1 might be broadcast for input 2.
// Put another way,
// input1.shape.FlatSize = y0 * y1 * y2 * y4,
// input2.shape.FlatSize = y0 * y2 * y3 * y4.
int y0 = params.broadcast_shape[0];
int y1 = params.broadcast_shape[1];
int y2 = params.broadcast_shape[2];
int y3 = params.broadcast_shape[3];
int y4 = params.broadcast_shape[4];
if (y4 > 1) {
// General fivefold pattern, with y4 > 1 so there is a non-broadcast inner
// dimension.
for (int i0 = 0; i0 < y0; ++i0) {
const uint8_t* input2_data_ptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
for (int i3 = 0; i3 < y3; ++i3) {
AddElementwise(y4, params, input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y4;
output_data_ptr += y4;
}
// We have broadcast y4 of input1 data y3 times, and now move on.
input1_data_ptr += y4;
}
}
// We have broadcast y2*y3*y4 of input2 data y1 times, and now move on.
input2_data_reset = input2_data_ptr;
}
} else {
// Special case of y4 == 1, in which the innermost loop is a single element
// and can be combined with the next (y3) as an inner broadcast.
//
// Note that this handles the case of pure scalar broadcast when
// y0 == y1 == y2 == 1. With low overhead it handles cases such as scalar
// broadcast with batch (as y2 > 1).
//
// NOTE The process is the same as the above general case except simplified
// for y4 == 1 and the loop over y3 is contained within the
// AddScalarBroadcast function.
for (int i0 = 0; i0 < y0; ++i0) {
const uint8_t* input2_data_ptr;
for (int i1 = 0; i1 < y1; ++i1) {
input2_data_ptr = input2_data_reset;
for (int i2 = 0; i2 < y2; ++i2) {
AddScalarBroadcast(y3, params, *input1_data_ptr, input2_data_ptr,
output_data_ptr);
input2_data_ptr += y3;
output_data_ptr += y3;
input1_data_ptr += 1;
}
}
input2_data_reset = input2_data_ptr;
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/add.h | C++ | apache-2.0 | 18,331 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
// T is expected to be either float or int.
template <typename T>
inline void AddN(const RuntimeShape& input_shape, const size_t num_inputs,
const T* const* input_data, T* output_data) {
// All inputs and output should have the same shape, this is checked during
// Prepare stage.
const size_t size = input_shape.FlatSize();
for (size_t i = 0; i < size; ++i) {
T x = 0;
for (size_t j = 0; j < num_inputs; ++j) {
x += input_data[j][i];
}
output_data[i] = x;
}
}
inline void AddN(const ArithmeticParams& params,
const RuntimeShape& input_shape, const size_t num_inputs,
const int8_t* const* input_data, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Input offset is negative input zero point. Activation tensors are
// asymmetric quantized so they span the full int8 range.
// All inputs should have same zero-point and scale, this is checked during
// Prepare stage.
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
// All inputs and output should have the same shape, this is checked during
// Prepare stage.
const size_t size = input_shape.FlatSize();
for (size_t i = 0; i < size; ++i) {
// accumulate in scaled_x before clamping to avoid overflow
const int32_t x = params.input1_offset; // x = 0
const int32_t shifted_x = x * (1 << params.left_shift);
int32_t scaled_x = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_x, params.input1_multiplier, params.input1_shift);
for (size_t j = 0; j < num_inputs; ++j) {
const int32_t y = params.input1_offset + input_data[j][i];
const int32_t shifted_y = y * (1 << params.left_shift);
int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_y, params.input1_multiplier, params.input1_shift);
scaled_x += scaled_y;
}
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
scaled_x, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[i] = static_cast<int8_t>(clamped_output);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ADD_N_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/add_n.h | C++ | apache-2.0 | 3,508 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
#include <functional>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
std::function<bool(T, T)> GetComparefunction(bool is_arg_max) {
if (is_arg_max) {
return std::greater<T>();
} else {
return std::less<T>();
}
}
template <typename T1, typename T2, typename T3, typename Cmp>
void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data, const Cmp& cmp) {
TFLITE_DCHECK_GT(input1_shape.DimensionsCount(), 0);
TFLITE_DCHECK_EQ(input1_shape.DimensionsCount() - 1,
output_shape.DimensionsCount());
int axis = input2_data[0];
if (axis < 0) {
axis += input1_shape.DimensionsCount();
}
const int axis_size = input1_shape.Dims(axis);
int outer_size = 1;
for (int i = 0; i < axis; ++i) {
TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i));
outer_size *= input1_shape.Dims(i);
}
int inner_size = 1;
const int dims_count = input1_shape.DimensionsCount();
for (int i = axis + 1; i < dims_count; ++i) {
TFLITE_DCHECK_EQ(input1_shape.Dims(i), output_shape.Dims(i - 1));
inner_size *= input1_shape.Dims(i);
}
for (int outer = 0; outer < outer_size; ++outer) {
for (int inner = 0; inner < inner_size; ++inner) {
auto min_max_value = input1_data[outer * axis_size * inner_size + inner];
T2 min_max_index = 0;
for (int i = 1; i < axis_size; ++i) {
const auto& curr_value =
input1_data[(outer * axis_size + i) * inner_size + inner];
if (cmp(curr_value, min_max_value)) {
min_max_value = curr_value;
min_max_index = static_cast<T2>(i);
}
}
output_data[outer * inner_size + inner] = min_max_index;
}
}
}
template <typename T1, typename T2, typename T3>
void ArgMinMax(const RuntimeShape& input1_shape, const T1* input1_data,
const T3* input2_data, const RuntimeShape& output_shape,
T2* output_data, const bool is_arg_max) {
ArgMinMax(input1_shape, input1_data, input2_data, output_shape, output_data,
GetComparefunction<T1>(is_arg_max));
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ARG_MIN_MAX_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/arg_min_max.h | C++ | apache-2.0 | 3,171 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
#include <algorithm>
#include <cstdint>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/tensor_utils_common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
namespace batch_matmul {
// Determine which dimension is the broadcast dimension.
inline int broadcast_dim(int lhs_dim, int rhs_dim) {
if (lhs_dim == rhs_dim) return lhs_dim;
if (lhs_dim == 1) return rhs_dim;
TFLITE_DCHECK_EQ(rhs_dim, 1);
return lhs_dim;
}
// Compute the "extent" for iterating on this dimension.
// If we are broadcasting, then don't advance (i.e return 0).
inline int extent(const RuntimeShape& shape, int x) {
if (shape.Dims(x) == 1) {
return 0;
}
int prod = 1;
for (int i = x + 1; i < shape.DimensionsCount(); ++i) {
prod *= shape.Dims(i);
}
return prod;
}
} // namespace batch_matmul
template <typename Ta, typename Tb, typename Tout>
inline void BatchMatMul(const RuntimeShape& lhs_shape, const Ta* lhs_data,
const RuntimeShape& rhs_shape, const Tb* rhs_data,
const RuntimeShape& output_shape, Tout* output_data) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const Ta* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const Tb* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const Ta* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const Tb* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const Ta* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const Tb* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
Tout* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
for (int i = 0; i < lhs_rows; ++i) {
Tout total = 0;
for (int k = 0; k < accum_depth; ++k) {
total += static_cast<Tout>(lhs_ptr2[accum_depth * i + k]) *
static_cast<Tout>(rhs_ptr2[j * accum_depth + k]);
}
int idx = lhs_rows * j + i;
out_ptr[idx] = total;
}
}
}
}
}
}
inline void BatchMatMul(const RuntimeShape& lhs_shape, const int8_t* lhs_data,
const RuntimeShape& rhs_shape, const int8_t* rhs_data,
const float* scaling_factors,
const int32_t* input_offset, int32_t* row_sums,
const RuntimeShape& output_shape, float* output_data,
bool* compute_row_sums) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int ioff_ext0 = rhs_ext0 == 0 ? 0 : rhs_cols;
const int ioff_ext1 = rhs_ext1 == 0 ? 0 : rhs_cols;
const int ioff_ext2 = rhs_ext2 == 0 ? 0 : rhs_cols;
const int woff_ext0 = lhs_ext0 == 0 ? 0 : lhs_rows;
const int woff_ext1 = lhs_ext1 == 0 ? 0 : lhs_rows;
const int woff_ext2 = lhs_ext2 == 0 ? 0 : lhs_rows;
if (!compute_row_sums || *compute_row_sums) {
int num_weights_matrices = 1;
for (int i = 1; i < extended_lhs_shape.DimensionsCount() - 2; ++i) {
num_weights_matrices *= extended_lhs_shape.Dims(i);
}
tensor_utils::ReductionSumVector(
lhs_data, row_sums, num_weights_matrices * lhs_rows, accum_depth);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const int8_t* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const int8_t* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
const int32_t* ioff_ptr0 = input_offset + (b0 * ioff_ext0);
const float* scale_ptr0 = scaling_factors + (b0 * ioff_ext0);
const int32_t* woff_ptr0 = row_sums + (b0 * woff_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const int8_t* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const int8_t* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
const int32_t* ioff_ptr1 = ioff_ptr0 + (b1 * ioff_ext1);
const float* scale_ptr1 = scale_ptr0 + (b1 * ioff_ext1);
const int32_t* woff_ptr1 = woff_ptr0 + (b1 * woff_ext1);
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const int8_t* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const int8_t* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
const int32_t* ioff_ptr2 = ioff_ptr1 + (b2 * ioff_ext2);
const float* scale_ptr2 = scale_ptr1 + (b2 * ioff_ext2);
const int32_t* woff_ptr2 = woff_ptr1 + (b2 * woff_ext2);
float* out_ptr = output_data + ((b0 * batch_dim1 * batch_dim2) +
b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
const float batch_scaling_factor = scale_ptr2[j];
const float batch_offset = static_cast<float>(ioff_ptr2[j]);
for (int i = 0; i < lhs_rows; ++i) {
int32_t total = 0;
for (int k = 0; k < accum_depth; ++k) {
total +=
lhs_ptr2[accum_depth * i + k] * rhs_ptr2[j * accum_depth + k];
}
int32_t row_sum = woff_ptr2[i];
total -= row_sum * batch_offset;
int idx = lhs_rows * j + i;
out_ptr[idx] += batch_scaling_factor * total;
}
}
}
}
}
}
template <typename T, typename AccumT>
inline void BatchMatMul(const FullyConnectedParams& params,
const RuntimeShape& lhs_shape, const T* lhs_data,
const RuntimeShape& rhs_shape, const T* rhs_data,
const RuntimeShape& output_shape, T* output_data) {
const RuntimeShape extended_lhs_shape =
RuntimeShape::ExtendedShape(5, lhs_shape);
const RuntimeShape extended_rhs_shape =
RuntimeShape::ExtendedShape(5, rhs_shape);
const int batch_dim0 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(0), extended_rhs_shape.Dims(0));
const int batch_dim1 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(1), extended_rhs_shape.Dims(1));
const int batch_dim2 = batch_matmul::broadcast_dim(
extended_lhs_shape.Dims(2), extended_rhs_shape.Dims(2));
const int lhs_ext0 = batch_matmul::extent(extended_lhs_shape, 0);
const int lhs_ext1 = batch_matmul::extent(extended_lhs_shape, 1);
const int lhs_ext2 = batch_matmul::extent(extended_lhs_shape, 2);
const int rhs_ext0 = batch_matmul::extent(extended_rhs_shape, 0);
const int rhs_ext1 = batch_matmul::extent(extended_rhs_shape, 1);
const int rhs_ext2 = batch_matmul::extent(extended_rhs_shape, 2);
// Set params for each matrix multiply.
const int lhs_rows = extended_lhs_shape.Dims(3);
const int rhs_cols = extended_rhs_shape.Dims(4);
const int accum_depth = extended_lhs_shape.Dims(4);
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
for (int b0 = 0; b0 < batch_dim0; ++b0) {
const T* lhs_ptr0 = lhs_data + (b0 * lhs_ext0);
const T* rhs_ptr0 = rhs_data + (b0 * rhs_ext0);
for (int b1 = 0; b1 < batch_dim1; ++b1) {
const T* lhs_ptr1 = lhs_ptr0 + b1 * lhs_ext1;
const T* rhs_ptr1 = rhs_ptr0 + b1 * rhs_ext1;
for (int b2 = 0; b2 < batch_dim2; ++b2) {
const T* lhs_ptr2 = lhs_ptr1 + b2 * lhs_ext2;
const T* rhs_ptr2 = rhs_ptr1 + b2 * rhs_ext2;
T* out_ptr = output_data +
((b0 * batch_dim1 * batch_dim2) + b1 * batch_dim2 + b2) *
lhs_rows * rhs_cols;
for (int j = 0; j < rhs_cols; ++j) {
for (int i = 0; i < lhs_rows; ++i) {
AccumT total = 0;
for (int k = 0; k < accum_depth; ++k) {
AccumT lhs_val = lhs_ptr2[accum_depth * i + k];
AccumT rhs_val = rhs_ptr2[accum_depth * j + k];
total += (lhs_val + filter_offset) * (rhs_val + input_offset);
}
int32_t total_scaled = MultiplyByQuantizedMultiplier(
total, output_multiplier, output_shift);
total_scaled += output_offset;
total_scaled = std::max(total_scaled, output_activation_min);
total_scaled = std::min(total_scaled, output_activation_max);
const int idx = lhs_rows * j + i;
out_ptr[idx] = static_cast<T>(total_scaled);
}
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_MATMUL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/batch_matmul.h | C++ | apache-2.0 | 12,027 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// TODO(b/135760455): Move this method anonymous namespace in a cc file.
inline RuntimeShape ExtendShapeBatchToSpace(const RuntimeShape& shape) {
if (shape.DimensionsCount() == 4) {
return shape;
}
RuntimeShape new_shape(4, 1);
new_shape.SetDim(0, shape.Dims(0));
new_shape.SetDim(1, shape.Dims(1));
new_shape.SetDim(3, shape.Dims(2));
return new_shape;
}
template <typename T>
inline void BatchToSpaceND(const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const int32_t* block_shape_data,
const RuntimeShape& unextended_input3_shape,
const int32_t* crops_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
ruy::profiler::ScopeLabel label("BatchToSpaceND");
TFLITE_DCHECK_GE(unextended_input1_shape.DimensionsCount(), 3);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(unextended_input1_shape.DimensionsCount(),
unextended_output_shape.DimensionsCount());
const RuntimeShape input1_shape =
ExtendShapeBatchToSpace(unextended_input1_shape);
const RuntimeShape output_shape =
ExtendShapeBatchToSpace(unextended_output_shape);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch_size = output_shape.Dims(0);
const int depth = input1_shape.Dims(3);
const int input_width = input1_shape.Dims(2);
const int input_height = input1_shape.Dims(1);
const int input_batch_size = input1_shape.Dims(0);
const int block_shape_height = block_shape_data[0];
const int block_shape_width =
unextended_input1_shape.DimensionsCount() == 4 ? block_shape_data[1] : 1;
const int crops_top = crops_data[0];
const int crops_left =
unextended_input1_shape.DimensionsCount() == 4 ? crops_data[2] : 0;
for (int in_batch = 0; in_batch < input_batch_size; ++in_batch) {
const int out_batch = in_batch % output_batch_size;
const int spatial_offset = in_batch / output_batch_size;
for (int in_h = 0; in_h < input_height; ++in_h) {
const int out_h = in_h * block_shape_height +
spatial_offset / block_shape_width - crops_top;
if (out_h < 0 || out_h >= output_height) {
continue;
}
for (int in_w = 0; in_w < input_width; ++in_w) {
const int out_w = in_w * block_shape_width +
spatial_offset % block_shape_width - crops_left;
if (out_w < 0 || out_w >= output_width) {
continue;
}
T* out = output_data + Offset(output_shape, out_batch, out_h, out_w, 0);
const T* in =
input1_data + Offset(input1_shape, in_batch, in_h, in_w, 0);
memcpy(out, in, depth * sizeof(T));
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BATCH_TO_SPACE_ND_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/batch_to_space_nd.h | C++ | apache-2.0 | 4,099 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// Also appears to duplicate MinimumMaximum.
//
// R: Result type. T1: Input 1 type. T2: Input 2 type.
template <typename R, typename T1, typename T2>
inline void BroadcastBinaryFunction4DSlow(
const RuntimeShape& unextended_input1_shape, const T1* input1_data,
const RuntimeShape& unextended_input2_shape, const T2* input2_data,
const RuntimeShape& unextended_output_shape, R* output_data,
R (*func)(T1, T2)) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
for (int b = 0; b < output_shape.Dims(0); ++b) {
for (int y = 0; y < output_shape.Dims(1); ++y) {
for (int x = 0; x < output_shape.Dims(2); ++x) {
for (int c = 0; c < output_shape.Dims(3); ++c) {
auto out_idx = Offset(output_shape, b, y, x, c);
auto in1_idx = SubscriptToIndex(desc1, b, y, x, c);
auto in2_idx = SubscriptToIndex(desc2, b, y, x, c);
auto in1_val = input1_data[in1_idx];
auto in2_val = input2_data[in2_idx];
output_data[out_idx] = func(in1_val, in2_val);
}
}
}
}
}
// R: Result type. T1: Input 1 type. T2: Input 2 type.
template <typename R, typename T1, typename T2>
inline void BinaryFunction(const RuntimeShape& input1_shape,
const T1* input1_data,
const RuntimeShape& input2_shape,
const T2* input2_data,
const RuntimeShape& output_shape, R* output_data,
R (*func)(T1, T2)) {
const int flat_size =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = func(input1_data[i], input2_data[i]);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BINARY_FUNCTION_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/binary_function.h | C++ | apache-2.0 | 3,295 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/kernel_util.h"
namespace tflite {
namespace reference_ops {
template <int N>
void BroadcastImpl(const NdArrayDesc<N>& input_desc, const char* input_data,
const NdArrayDesc<N>& output_desc, char* output_data,
int indexes[N], int dim, const int last_broadcasting_dim,
const int type_size) {
// Copy data from input to output.
if (dim == last_broadcasting_dim) {
int copy_size = output_desc.strides[dim] * type_size;
const char* data_src =
input_data + SubscriptToIndex(input_desc, indexes) * type_size;
char* data_dst =
output_data + SubscriptToIndex(output_desc, indexes) * type_size;
for (int i = 0; i < output_desc.extents[dim]; ++i, data_dst += copy_size) {
memcpy(data_dst, data_src, copy_size);
}
return;
}
// Recursive call to find the next broadcasting.
for (indexes[dim] = 0; indexes[dim] < input_desc.extents[dim];
++indexes[dim]) {
BroadcastImpl<N>(input_desc, input_data, output_desc, output_data, indexes,
dim + 1, last_broadcasting_dim, type_size);
}
// Duplicate data in output tensor.
indexes[dim] = 0;
if (input_desc.extents[dim] != output_desc.extents[dim]) {
int copy_size = output_desc.strides[dim] * type_size;
char* data_src =
output_data + SubscriptToIndex(output_desc, indexes) * type_size;
char* data_dst = data_src + copy_size;
for (int i = 1; i < output_desc.extents[dim]; ++i, data_dst += copy_size) {
memcpy(data_dst, data_src, copy_size);
}
}
}
template <int N>
inline void BroadcastTo(const RuntimeShape& unextended_input_shape,
const char* input_data,
const RuntimeShape& unextended_output_shape,
char* output_data, TfLiteType data_type) {
NdArrayDesc<N> input_desc;
NdArrayDesc<N> output_desc;
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_input_shape),
&input_desc);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
// Get the last dimension has broadcasting. At this dimension, the data is
// copied from input tensor to output tensor.
int last_broadcast_dim = -1;
for (int i = N - 1; i >= 0; --i) {
if (input_desc.extents[i] != output_desc.extents[i]) {
last_broadcast_dim = i;
break;
}
}
// If non-broadcasting, just copy data from input to output tensor.
if (last_broadcast_dim == -1) {
memcpy(output_data, input_data,
unextended_input_shape.FlatSize() * TfLiteTypeGetSize(data_type));
return;
}
// Broadcasting using memcpy.
int indexes[N] = {0};
BroadcastImpl<N>(input_desc, input_data, output_desc, output_data, indexes, 0,
last_broadcast_dim, TfLiteTypeGetSize(data_type));
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_BROADCAST_TO_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/broadcast_to.h | C++ | apache-2.0 | 3,869 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CAST_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CAST_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename SrcT, typename DstT>
inline void Cast(const RuntimeShape& input_shape, const SrcT* input_data,
const RuntimeShape& output_shape, DstT* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
int offset = i;
output_data[offset] = static_cast<DstT>(input_data[offset]);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CAST_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/cast.h | C++ | apache-2.0 | 1,484 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Ceil(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = std::ceil(input_data[i]);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CEIL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/ceil.h | C++ | apache-2.0 | 1,333 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline bool EqualFn(T lhs, T rhs) {
return lhs == rhs;
}
template <typename T>
inline bool NotEqualFn(T lhs, T rhs) {
return lhs != rhs;
}
template <typename T>
inline bool GreaterFn(T lhs, T rhs) {
return lhs > rhs;
}
template <typename T>
inline bool GreaterEqualFn(T lhs, T rhs) {
return lhs >= rhs;
}
template <typename T>
inline bool LessFn(T lhs, T rhs) {
return lhs < rhs;
}
template <typename T>
inline bool LessEqualFn(T lhs, T rhs) {
return lhs <= rhs;
}
template <typename T>
using ComparisonFn = bool (*)(T, T);
template <typename T, ComparisonFn<T> F>
inline void ComparisonImpl(
const ComparisonParams& op_params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, bool* output_data) {
const int64_t flatsize =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int64_t i = 0; i < flatsize; ++i) {
output_data[i] = F(input1_data[i], input2_data[i]);
}
}
template <ComparisonFn<float> F>
inline void Comparison(const ComparisonParams& op_params,
const RuntimeShape& input1_shape,
const float* input1_data,
const RuntimeShape& input2_shape,
const float* input2_data,
const RuntimeShape& output_shape, bool* output_data) {
ComparisonImpl<float, F>(op_params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data);
}
template <typename T, ComparisonFn<int32_t> F>
inline void ComparisonWithScaling(
const ComparisonParams& op_params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, bool* output_data) {
int left_shift = op_params.left_shift;
int32_t input1_offset = op_params.input1_offset;
int32_t input1_multiplier = op_params.input1_multiplier;
int input1_shift = op_params.input1_shift;
int32_t input2_offset = op_params.input2_offset;
int32_t input2_multiplier = op_params.input2_multiplier;
int input2_shift = op_params.input2_shift;
const int64_t flatsize =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int64_t i = 0; i < flatsize; ++i) {
const int32_t input1_val = input1_offset + input1_data[i];
const int32_t input2_val = input2_offset + input2_data[i];
const int32_t shifted_input1_val = input1_val * (1 << left_shift);
const int32_t shifted_input2_val = input2_val * (1 << left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, input1_multiplier, input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, input2_multiplier, input2_shift);
output_data[i] = F(scaled_input1_val, scaled_input2_val);
}
}
struct BroadcastComparison4DSlowCommon {
const RuntimeShape output_shape;
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
};
inline BroadcastComparison4DSlowCommon BroadcastComparison4DSlowPreprocess(
const RuntimeShape& unextended_input1_shape,
const RuntimeShape& unextended_input2_shape,
const RuntimeShape& unextended_output_shape) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
return {RuntimeShape::ExtendedShape(4, unextended_output_shape), desc1,
desc2};
}
template <typename T, ComparisonFn<T> F>
inline void BroadcastComparison4DSlowImpl(
const ComparisonParams& op_params,
const RuntimeShape& unextended_input1_shape, const T* input1_data,
const RuntimeShape& unextended_input2_shape, const T* input2_data,
const RuntimeShape& unextended_output_shape, bool* output_data) {
const BroadcastComparison4DSlowCommon dims =
BroadcastComparison4DSlowPreprocess(unextended_input1_shape,
unextended_input2_shape,
unextended_output_shape);
for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
output_data[Offset(dims.output_shape, b, y, x, c)] =
F(input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)],
input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)]);
}
}
}
}
}
template <ComparisonFn<float> F>
inline void BroadcastComparison4DSlow(const ComparisonParams& op_params,
const RuntimeShape& input1_shape,
const float* input1_data,
const RuntimeShape& input2_shape,
const float* input2_data,
const RuntimeShape& output_shape,
bool* output_data) {
BroadcastComparison4DSlowImpl<float, F>(op_params, input1_shape, input1_data,
input2_shape, input2_data,
output_shape, output_data);
}
template <typename T, ComparisonFn<int32_t> F>
inline void BroadcastComparison4DSlowWithScaling(
const ComparisonParams& op_params,
const RuntimeShape& unextended_input1_shape, const T* input1_data,
const RuntimeShape& unextended_input2_shape, const T* input2_data,
const RuntimeShape& unextended_output_shape, bool* output_data) {
const BroadcastComparison4DSlowCommon dims =
BroadcastComparison4DSlowPreprocess(unextended_input1_shape,
unextended_input2_shape,
unextended_output_shape);
int left_shift = op_params.left_shift;
int32_t input1_offset = op_params.input1_offset;
int32_t input1_multiplier = op_params.input1_multiplier;
int input1_shift = op_params.input1_shift;
int32_t input2_offset = op_params.input2_offset;
int32_t input2_multiplier = op_params.input2_multiplier;
int input2_shift = op_params.input2_shift;
for (int b = 0; b < dims.output_shape.Dims(0); ++b) {
for (int y = 0; y < dims.output_shape.Dims(1); ++y) {
for (int x = 0; x < dims.output_shape.Dims(2); ++x) {
for (int c = 0; c < dims.output_shape.Dims(3); ++c) {
const int32_t input1_val =
input1_offset +
input1_data[SubscriptToIndex(dims.desc1, b, y, x, c)];
const int32_t input2_val =
input2_offset +
input2_data[SubscriptToIndex(dims.desc2, b, y, x, c)];
const int32_t shifted_input1_val = input1_val * (1 << left_shift);
const int32_t shifted_input2_val = input2_val * (1 << left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, input1_multiplier, input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, input2_multiplier, input2_shift);
output_data[Offset(dims.output_shape, b, y, x, c)] =
F(scaled_input1_val, scaled_input2_val);
}
}
}
}
}
#define TFLITE_COMPARISON_OP(name) \
inline void name(const ComparisonParams& op_params, \
const RuntimeShape& input1_shape, const float* input1_data, \
const RuntimeShape& input2_shape, const float* input2_data, \
const RuntimeShape& output_shape, bool* output_data) { \
Comparison<name##Fn>(op_params, input1_shape, input1_data, input2_shape, \
input2_data, output_shape, output_data); \
} \
template <typename T> \
inline void name##NoScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
ComparisonImpl<T, name##Fn>(op_params, input1_shape, input1_data, \
input2_shape, input2_data, output_shape, \
output_data); \
} \
template <typename T> \
inline void name##WithScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
ComparisonWithScaling<T, name##Fn>(op_params, input1_shape, input1_data, \
input2_shape, input2_data, \
output_shape, output_data); \
} \
template <typename T> \
inline void Broadcast4DSlow##name##NoScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
BroadcastComparison4DSlowImpl<T, name##Fn>( \
op_params, input1_shape, input1_data, input2_shape, input2_data, \
output_shape, output_data); \
} \
inline void Broadcast4DSlow##name( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const float* input1_data, const RuntimeShape& input2_shape, \
const float* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
BroadcastComparison4DSlow<name##Fn>(op_params, input1_shape, input1_data, \
input2_shape, input2_data, \
output_shape, output_data); \
} \
template <typename T> \
inline void Broadcast4DSlow##name##WithScaling( \
const ComparisonParams& op_params, const RuntimeShape& input1_shape, \
const T* input1_data, const RuntimeShape& input2_shape, \
const T* input2_data, const RuntimeShape& output_shape, \
bool* output_data) { \
BroadcastComparison4DSlowWithScaling<T, name##Fn>( \
op_params, input1_shape, input1_data, input2_shape, input2_data, \
output_shape, output_data); \
}
TFLITE_COMPARISON_OP(Equal);
TFLITE_COMPARISON_OP(NotEqual);
TFLITE_COMPARISON_OP(Greater);
TFLITE_COMPARISON_OP(GreaterEqual);
TFLITE_COMPARISON_OP(Less);
TFLITE_COMPARISON_OP(LessEqual);
#undef TFLITE_COMPARISON_OP
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_COMPARISONS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/comparisons.h | C++ | apache-2.0 | 13,633 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename Scalar>
inline void Concatenation(const ConcatenationParams& params,
const RuntimeShape* const* input_shapes,
const Scalar* const* input_data,
const RuntimeShape& output_shape,
Scalar* output_data) {
int axis = params.axis;
int inputs_count = params.inputs_count;
const int concat_dimensions = output_shape.DimensionsCount();
TFLITE_DCHECK_LT(axis, concat_dimensions);
int64_t concat_size = 0;
for (int i = 0; i < inputs_count; i++) {
TFLITE_DCHECK_EQ(input_shapes[i]->DimensionsCount(), concat_dimensions);
for (int j = 0; j < concat_dimensions; j++) {
if (j != axis) {
MatchingDim(*input_shapes[i], j, output_shape, j);
}
}
concat_size += input_shapes[i]->Dims(axis);
}
TFLITE_DCHECK_EQ(concat_size, output_shape.Dims(axis));
int64_t outer_size = 1;
for (int i = 0; i < axis; ++i) {
outer_size *= output_shape.Dims(i);
}
// For all input arrays,
// FlatSize() = outer_size * Dims(axis) * base_inner_size;
int64_t base_inner_size = 1;
for (int i = axis + 1; i < concat_dimensions; ++i) {
base_inner_size *= output_shape.Dims(i);
}
Scalar* output_ptr = output_data;
for (int k = 0; k < outer_size; k++) {
for (int i = 0; i < inputs_count; ++i) {
const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size;
const Scalar* input_ptr = input_data[i] + k * copy_size;
memcpy(output_ptr, input_ptr, copy_size * sizeof(Scalar));
output_ptr += copy_size;
}
}
}
// TODO(b/174275780): The quantized implementation of concatentation isn't fully
// quantized as it takes scale as a floating point value. This should be fixed
// when optimizng this routine further.
inline void ConcatenationWithScaling(const ConcatenationParams& params,
const RuntimeShape* const* input_shapes,
const uint8_t* const* input_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
int axis = params.axis;
const int32_t* input_zeropoint = params.input_zeropoint;
const float* input_scale = params.input_scale;
int inputs_count = params.inputs_count;
const int32_t output_zeropoint = params.output_zeropoint;
const float output_scale = params.output_scale;
const int concat_dimensions = output_shape.DimensionsCount();
TFLITE_DCHECK_LT(axis, concat_dimensions);
int64_t concat_size = 0;
for (int i = 0; i < inputs_count; i++) {
TFLITE_DCHECK_EQ(input_shapes[i]->DimensionsCount(), concat_dimensions);
for (int j = 0; j < concat_dimensions; j++) {
if (j != axis) {
MatchingDim(*input_shapes[i], j, output_shape, j);
}
}
concat_size += input_shapes[i]->Dims(axis);
}
TFLITE_DCHECK_EQ(concat_size, output_shape.Dims(axis));
int64_t outer_size = 1;
for (int i = 0; i < axis; ++i) {
outer_size *= output_shape.Dims(i);
}
// For all input arrays,
// FlatSize() = outer_size * Dims(axis) * base_inner_size;
int64_t base_inner_size = 1;
for (int i = axis + 1; i < concat_dimensions; ++i) {
base_inner_size *= output_shape.Dims(i);
}
const float inverse_output_scale = 1.f / output_scale;
uint8_t* output_ptr = output_data;
for (int k = 0; k < outer_size; k++) {
for (int i = 0; i < inputs_count; ++i) {
const int copy_size = input_shapes[i]->Dims(axis) * base_inner_size;
const uint8_t* input_ptr = input_data[i] + k * copy_size;
if (input_zeropoint[i] == output_zeropoint &&
input_scale[i] == output_scale) {
memcpy(output_ptr, input_ptr, copy_size);
} else {
const float scale = input_scale[i] * inverse_output_scale;
const float bias = -input_zeropoint[i] * scale;
for (int j = 0; j < copy_size; ++j) {
const int32_t value = static_cast<int32_t>(tflite::TfLiteRound(
input_ptr[j] * scale + bias)) +
output_zeropoint;
output_ptr[j] = static_cast<uint8_t>(
std::max<int32_t>(std::min<int32_t>(255, value), 0));
}
}
output_ptr += copy_size;
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONCATENATION_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/concatenation.h | C++ | apache-2.0 | 5,501 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape,
float* im2col_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * stride_width) - pad_width;
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
float total = 0.f;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (!is_point_inside_image) {
continue;
}
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
float input_value = input_data[Offset(input_shape, batch, in_y,
in_x, in_channel)];
float filter_value = filter_data[Offset(
filter_shape, out_channel, filter_y, filter_x, in_channel)];
total += (input_value * filter_value);
}
}
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[out_channel];
}
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
ActivationFunctionWithMinMax(total + bias_value,
output_activation_min,
output_activation_max);
}
}
}
}
}
inline void Conv(const ConvParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& filter_shape,
const uint8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
uint8_t* output_data, const RuntimeShape& im2col_shape,
uint8_t* im2col_data, void* cpu_backend_context) {
(void)cpu_backend_context; // only used in optimized code.
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * stride_width) - pad_width;
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (!is_point_inside_image) {
continue;
}
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
int32_t input_val = input_data[Offset(input_shape, batch, in_y,
in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, out_channel, filter_y, filter_x, in_channel)];
acc +=
(filter_val + filter_offset) * (input_val + input_offset);
}
}
}
if (bias_data) {
acc += bias_data[out_channel];
}
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
static_cast<uint8_t>(acc);
}
}
}
}
}
inline void HybridConvPerChannel(
const ConvParams& params, float* scaling_factors_ptr,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const RuntimeShape& im2col_shape, int8_t* im2col_data,
const float* per_channel_scale, int32_t* input_offset) {
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// If the location is outside the bounds of the input image,
// use zero as a default value.
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
int32_t input_val = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
int32_t filter_val =
filter_data[Offset(filter_shape, out_channel, filter_y,
filter_x, in_channel)];
acc += filter_val * (input_val - input_offset[batch]);
}
}
}
}
float acc_float =
acc * per_channel_scale[out_channel] * scaling_factors_ptr[batch];
if (bias_data) {
acc_float += bias_data[out_channel];
}
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
ActivationFunctionWithMinMax(acc_float, output_activation_min,
output_activation_max);
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/conv.h | C++ | apache-2.0 | 12,770 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Conv3D(const Conv3DParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 5);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 5);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_num_channels = MatchingDim(input_shape, 4, filter_shape, 3);
const int output_num_channels = MatchingDim(filter_shape, 4, output_shape, 4);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_num_channels);
}
// Only NDHWC format is currently supported.
const int input_width = input_shape.Dims(3);
const int input_height = input_shape.Dims(2);
const int input_depth = input_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_depth = filter_shape.Dims(0);
const int output_width = output_shape.Dims(3);
const int output_height = output_shape.Dims(2);
const int output_depth = output_shape.Dims(1);
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int pad_depth = params.padding_values.depth;
for (int batch = 0; batch < batches; ++batch) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
const int in_d_origin = (out_d * params.stride_depth) - pad_depth;
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * params.stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * params.stride_width) - pad_width;
for (int out_channel = 0; out_channel < output_num_channels;
++out_channel) {
float total = 0.f;
for (int filter_d = 0; filter_d < filter_depth; ++filter_d) {
const int in_d = in_d_origin + params.dilation_depth * filter_d;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y =
in_y_origin + params.dilation_height * filter_y;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x =
in_x_origin + params.dilation_width * filter_x;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height) && (in_d >= 0) &&
(in_d < input_depth);
if (!is_point_inside_image) {
continue;
}
for (int in_channel = 0; in_channel < input_num_channels;
++in_channel) {
float input_value = input_data[Offset(
input_shape, batch, in_d, in_y, in_x, in_channel)];
float filter_value =
filter_data[Offset(filter_shape, filter_d, filter_y,
filter_x, in_channel, out_channel)];
total += (input_value * filter_value);
}
}
}
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[out_channel];
}
output_data[Offset(output_shape, batch, out_d, out_y, out_x,
out_channel)] =
ActivationFunctionWithMinMax(total + bias_value,
params.float_activation_min,
params.float_activation_max);
}
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV3D_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/conv3d.h | C++ | apache-2.0 | 5,143 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
#include <algorithm>
#include <cstdint>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void CumSum(const T* input_data, const RuntimeShape& shape, int32_t axis,
bool exclusive, bool reverse, T* output_data) {
const int32_t rank = shape.DimensionsCount();
TFLITE_DCHECK_GE(rank, 1);
TFLITE_DCHECK_GE(axis, 0);
TFLITE_DCHECK_LT(axis, rank);
size_t inner = 1;
size_t outer = 1;
size_t depth = 1;
for (int32_t i = 0; i < rank; i++) {
if (i < axis)
inner *= shape.Dims(i);
else if (i > axis)
outer *= shape.Dims(i);
else
depth = shape.Dims(i);
}
for (size_t outer_index = 0; outer_index < outer; outer_index++) {
size_t outer_index_adj;
if (reverse)
outer_index_adj = (outer - 1) - outer_index;
else
outer_index_adj = outer_index;
for (size_t inner_index = 0; inner_index < inner; inner_index++) {
T accumulator = 0;
size_t inner_index_adj;
if (reverse)
inner_index_adj = (inner - 1) - inner_index;
else
inner_index_adj = inner_index;
for (size_t depth_index = 0; depth_index < depth; depth_index++) {
size_t depth_index_adj;
if (reverse)
depth_index_adj = (depth - 1) - depth_index;
else
depth_index_adj = depth_index;
size_t index = outer_index_adj;
index += inner_index_adj * depth * outer;
index += depth_index_adj * outer;
if (exclusive) {
output_data[index] = accumulator;
accumulator += input_data[index];
} else {
accumulator += input_data[index];
output_data[index] = accumulator;
}
}
}
}
}
//
// Quantized INT8 CUMSUM
//
inline void CumSum(const ArithmeticParams& params, const int8_t* input_data,
const RuntimeShape& shape, int32_t axis, bool exclusive,
bool reverse, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Input offset is negative input zero point. Activation tensors are
// asymmetric quantized so they span the full int8 range.
// All inputs should have same zero-point and scale, this is checked during
// Prepare stage.
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
const int32_t rank = shape.DimensionsCount();
TFLITE_DCHECK_GE(rank, 1);
TFLITE_DCHECK_GE(axis, 0);
TFLITE_DCHECK_LT(axis, rank);
size_t inner = 1;
size_t outer = 1;
size_t depth = 1;
for (int32_t i = 0; i < rank; i++) {
if (i < axis)
inner *= shape.Dims(i);
else if (i > axis)
outer *= shape.Dims(i);
else
depth = shape.Dims(i);
}
for (size_t outer_index = 0; outer_index < outer; outer_index++) {
size_t outer_index_adj;
if (reverse)
outer_index_adj = (outer - 1) - outer_index;
else
outer_index_adj = outer_index;
for (size_t inner_index = 0; inner_index < inner; inner_index++) {
int32_t accumulator = params.input1_offset; // accumulator = 0
accumulator *= (1 << params.left_shift);
accumulator = MultiplyByQuantizedMultiplierSmallerThanOneExp(
accumulator, params.input1_multiplier, params.input1_shift);
size_t inner_index_adj;
if (reverse)
inner_index_adj = (inner - 1) - inner_index;
else
inner_index_adj = inner_index;
for (size_t depth_index = 0; depth_index < depth; depth_index++) {
size_t depth_index_adj;
if (reverse)
depth_index_adj = (depth - 1) - depth_index;
else
depth_index_adj = depth_index;
size_t index = outer_index_adj;
index += inner_index_adj * depth * outer;
index += depth_index_adj * outer;
const int32_t y = params.input1_offset + input_data[index];
const int32_t shifted_y = y * (1 << params.left_shift);
const int32_t scaled_y = MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_y, params.input1_multiplier, params.input1_shift);
int32_t scaled_output;
if (exclusive) {
scaled_output = accumulator;
accumulator += scaled_y;
} else {
accumulator += scaled_y;
scaled_output = accumulator;
}
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
scaled_output, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
output_data[index] = static_cast<int8_t>(clamped_output);
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CUMSUM_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/cumsum.h | C++ | apache-2.0 | 5,917 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DENSIFY_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DENSIFY_H_
#include <vector>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/tools/optimize/sparsity/format_converter.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Densify(const TfLiteSparsity* sparsity,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data,
TfLiteContext* context) {
const int dims_count = output_shape.DimensionsCount();
std::vector<int> vector_shape(dims_count);
for (int i = 0; i < dims_count; i++) {
vector_shape[i] = output_shape.Dims(i);
}
tflite::optimize::sparsity::FormatConverter<T> converter(vector_shape,
*sparsity);
converter.SparseToDense(input_data, output_shape.FlatSize(), output_data,
context);
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DENSIFY_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/densify.h | C++ | apache-2.0 | 1,906 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void DepthToSpace(const tflite::DepthToSpaceParams& op_params,
const RuntimeShape& unextended_input_shape,
const T* input_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK_LE(unextended_input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int input_depth = input_shape.Dims(3);
const int input_width = input_shape.Dims(2);
const int input_height = input_shape.Dims(1);
const int input_batch = input_shape.Dims(0);
const int output_depth = output_shape.Dims(3);
const int output_width = output_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_batch = output_shape.Dims(0);
const int32_t block_size = op_params.block_size;
TFLITE_DCHECK_EQ(input_width * block_size, output_width);
TFLITE_DCHECK_EQ(input_height * block_size, output_height);
TFLITE_DCHECK_EQ(input_depth, output_depth * block_size * block_size);
TFLITE_DCHECK_EQ(input_batch, output_batch);
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_h = 0; out_h < output_height; ++out_h) {
for (int out_w = 0; out_w < output_width; ++out_w) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
const int in_d =
out_d + ((out_h % block_size) * block_size + out_w % block_size) *
output_depth;
const int in_w = out_w / block_size;
const int in_h = out_h / block_size;
const int in_b = out_b;
const int input_index = Offset(input_shape, in_b, in_h, in_w, in_d);
const int output_index =
Offset(output_shape, out_b, out_h, out_w, out_d);
output_data[output_index] = input_data[input_index];
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTH_TO_SPACE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/depth_to_space.h | C++ | apache-2.0 | 3,148 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void DepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int b = 0; b < batches; ++b) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int ic = 0; ic < input_depth; ++ic) {
for (int m = 0; m < depth_multiplier; m++) {
const int oc = m + ic * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
float total = 0.f;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// If the location is outside the bounds of the input image,
// use zero as a default value.
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
float input_value =
input_data[Offset(input_shape, b, in_y, in_x, ic)];
float filter_value = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, oc)];
total += (input_value * filter_value);
}
}
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[oc];
}
output_data[Offset(output_shape, b, out_y, out_x, oc)] =
ActivationFunctionWithMinMax(total + bias_value,
output_activation_min,
output_activation_max);
}
}
}
}
}
}
} // end namespace reference_ops
} // end namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_FLOAT_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h | C++ | apache-2.0 | 4,625 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_UINT8_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_UINT8_H_
#include <algorithm>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
// Used in tests and template parameters to control which version of depthwise
// convolution is called. Primarily for reference code, and specializations
// forced in tests.
enum class DepthwiseConvImplementation {
// Run all tests against kUseStandardEntry even if also testing another
// kernel, since we need to be sure that the main DepthwiseConv() function in
// optimized_ops.h dispatches to a correctly-executing kernel.
kNone = 0, // The "default" option: use the normal
// DepthwiseConv kernel (entry) function.
kUseGenericKernel, // Forced use of generic kernel.
kUseNeon3x3, // 3x3 kernel that uses NEON when available.
kUseNeon3x3DotProduct, // 3x3 kernel that uses dot-product enabled NEON
// when available.
kUseCModel3x3DotProduct, // 3x3 kernel, reference C model that is intended
// to match overall design NEON code.
kUseUnwound3x3DotProduct, // 3x3 kernel, reference C model with unwound loops
// and some arrays.
kUseIntrinsics3x3DotProduct, // 3x3 kernel using NEON intrinsics.
};
// Category of depthwise convolution output rounding.
enum class DepthwiseConvOutputRounding {
kNone = 0, // Invalid: specific method must be specified.
kAwayFromZero, // Original method: exact halves rounded away from zero.
kUpward, // Halves towards +infinity: adds 0.5 before truncate.
// This is where a future kNearestEven would be placed.
};
// Category of depthwise convolution depth multiplication.
enum class DepthwiseConvDepthMultiplication {
kNoMultiplication = 0, // Depth multiplier = 1.
kUnitInputDepth, // Input depth = 1, output depth = depth multiplier.
};
namespace reference_ops {
namespace depthwise_conv {
template <DepthwiseConvOutputRounding output_rounding>
inline int32_t DepthwiseConvRound(int32_t x, int32_t quantized_multiplier,
int shift) {
TFLITE_DCHECK_NE(output_rounding, DepthwiseConvOutputRounding::kNone);
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
}
template <>
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kAwayFromZero>(
int32_t x, int32_t quantized_multiplier, int shift) {
return MultiplyByQuantizedMultiplier(x, quantized_multiplier, shift);
}
template <>
inline int32_t DepthwiseConvRound<DepthwiseConvOutputRounding::kUpward>(
int32_t x, int32_t quantized_multiplier, int shift) {
using gemmlowp::SaturatingRoundingDoublingHighMul;
const int left_shift = shift > 0 ? shift : 0;
const int right_shift = shift > 0 ? 0 : -shift;
const int rounding_offset = right_shift > 0 ? 1 << (right_shift - 1) : 0;
return (SaturatingRoundingDoublingHighMul(x * (1 << left_shift),
quantized_multiplier) +
rounding_offset) >>
right_shift;
}
template <DepthwiseConvOutputRounding output_rounding>
struct DepthwiseConvBasicKernel {
static inline void Run(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& filter_shape,
const uint8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
uint8_t* output_data) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int b = 0; b < batches; ++b) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int ic = 0; ic < input_depth; ++ic) {
for (int m = 0; m < depth_multiplier; m++) {
const int oc = m + ic * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x =
in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// If the location is outside the bounds of the input image,
// use zero as a default value.
if ((in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height)) {
int32_t input_val =
input_data[Offset(input_shape, b, in_y, in_x, ic)];
int32_t filter_val = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, oc)];
acc += (filter_val + filter_offset) *
(input_val + input_offset);
}
}
}
if (bias_data) {
acc += bias_data[oc];
}
acc = DepthwiseConvRound<output_rounding>(acc, output_multiplier,
output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[Offset(output_shape, b, out_y, out_x, oc)] =
static_cast<uint8_t>(acc);
}
}
}
}
}
}
// TODO(b/148596273): Reconcile reference versions, perhaps with common
// MultiplyByQuantizedMultiplier or DepthwiseConvRound function.
static inline void RunPerChannel(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int8_t* output_data) {
// Get parameters.
// TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32_t input_offset = params.input_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
const int32_t* output_multiplier = params.output_multiplier_per_channel;
const int32_t* output_shift = params.output_shift_per_channel;
// Check dimensions of the tensors.
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
for (int m = 0; m < depth_multiplier; ++m) {
const int output_channel = m + in_channel * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x =
in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (is_point_inside_image) {
int32_t input_val = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, output_channel)];
// Accumulate with 32 bits accumulator.
// In the nudging process during model quantization, we
// force real value of 0.0 be represented by a quantized
// value. This guarantees that the input_offset is a int8_t,
// even though it is represented using int32_t. int32_t +=
// int8_t
// * (int8_t - int8_t) so the highest value we can get from
// each accumulation is [-127, 127] * ([-128, 127] -
// [-128, 127]), which is [-32512, 32512]. log2(32512)
// = 14.98, which means we can accumulate at least 2^16
// multiplications without overflow. The accumulator is
// applied to a filter so the accumulation logic will hold
// as long as the filter size (filter_y * filter_x *
// in_channel) does not exceed 2^16, which is the case in
// all the models we have seen so far.
acc += filter_val * (input_val + input_offset);
}
}
}
if (bias_data) {
acc += bias_data[output_channel];
}
acc = DepthwiseConvRound<output_rounding>(
acc, output_multiplier[output_channel],
output_shift[output_channel]);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x,
output_channel)] = static_cast<int8_t>(acc);
}
}
}
}
}
}
};
} // namespace depthwise_conv
inline void DepthwiseConv(
const DepthwiseParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& filter_shape,
const uint8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
uint8_t* output_data) {
return depthwise_conv::DepthwiseConvBasicKernel<
DepthwiseConvOutputRounding::kAwayFromZero>::Run(params, input_shape,
input_data, filter_shape,
filter_data, bias_shape,
bias_data, output_shape,
output_data);
}
} // namespace reference_ops
} // end namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEPTHWISECONV_UINT8_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h | C++ | apache-2.0 | 14,754 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_
#include <limits.h>
#include <vector>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// Dequantizes into a float without rounding.
template <typename InputT, typename OutputT>
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape,
const InputT* input_data,
const RuntimeShape& output_shape, OutputT* output_data) {
int32_t zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const int32_t val = input_data[i];
const OutputT result = static_cast<OutputT>(scale * (val - zero_point));
output_data[i] = result;
}
}
// Dequantizes per-channel quantized tensor to float.
template <typename T>
inline void PerChannelDequantize(
const tflite::PerChannelDequantizationParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, float* output_data) {
// Ensure flat size is same.
MatchingFlatSize(input_shape, output_shape);
const int32_t* zero_point = op_params.zero_point;
const float* scale = op_params.scale;
const int32_t quantized_dimension = op_params.quantized_dimension;
const int32_t num_dims = input_shape.DimensionsCount();
const int32_t* dims_data = input_shape.DimsData();
std::vector<int> current_dim(num_dims, 0);
do {
size_t offset =
ReducedOutputOffset(num_dims, reinterpret_cast<const int*>(dims_data),
current_dim.data(), 0, nullptr);
const int channel = current_dim[quantized_dimension];
const int32_t val = input_data[offset];
const float result =
static_cast<float>(scale[channel] * (val - zero_point[channel]));
output_data[offset] = result;
} while (NextIndex(num_dims, reinterpret_cast<const int*>(dims_data),
current_dim.data()));
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DEQUANTIZE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/dequantize.h | C++ | apache-2.0 | 3,015 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
#include <algorithm>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void DivCheckArithmeticParams(const ArithmeticParams& params) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Input offset is negative input zero point. Activation tensors are
// asymmetric quantized so they span the full int8 range.
constexpr int32_t max_value =
static_cast<int32_t>(std::numeric_limits<T>::max());
TFLITE_DCHECK_GE(params.input1_offset, -max_value);
TFLITE_DCHECK_LE(params.input1_offset, max_value);
TFLITE_DCHECK_GE(params.input2_offset, -max_value);
TFLITE_DCHECK_LE(params.input2_offset, max_value);
TFLITE_DCHECK_GE(params.output_offset, -max_value);
TFLITE_DCHECK_LE(params.output_offset, max_value);
}
// Element-wise div that can often be used for inner loop of broadcast Div as
// well as the non-broadcast Div.
template <typename T>
inline void DivElementwise(int size, const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data) {
DivCheckArithmeticParams<T>(params);
for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
const int32_t input2_val = params.input2_offset + input2_data[i];
TFLITE_DCHECK_NE(input2_val, 0);
int recip_shift;
const int32_t input2_inv =
(input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift)
: -GetReciprocal(-input2_val, 31, &recip_shift);
const int headroom = CountLeadingSignBits(input1_val);
const int32_t unscaled_quotient =
MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
headroom);
const int total_shift = params.output_shift - recip_shift - headroom;
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplierSmallerThanOneExp(
unscaled_quotient, params.output_multiplier, total_shift);
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<T>(clamped_output);
}
}
inline void Div(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8_t* input1_data,
const RuntimeShape& input2_shape, const uint8_t* input2_data,
const RuntimeShape& output_shape, uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void Div(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8_t* input1_data,
const RuntimeShape& input2_shape, const int8_t* input2_data,
const RuntimeShape& output_shape, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
DivElementwise(flat_size, params, input1_data, input2_data, output_data);
}
template <typename T, int N = 5>
inline void BroadcastDivSlowQuantized(
const ArithmeticParams& params, const RuntimeShape& unextended_input1_shape,
const T* input1_data, const RuntimeShape& unextended_input2_shape,
const T* input2_data, const RuntimeShape& unextended_output_shape,
T* output_data) {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
DivCheckArithmeticParams<T>(params);
auto div_func = [&](int indexes[N]) {
const int32_t input1_val =
params.input1_offset + input1_data[SubscriptToIndex(desc1, indexes)];
const int32_t input2_val =
params.input2_offset + input2_data[SubscriptToIndex(desc2, indexes)];
TFLITE_DCHECK_NE(input2_val, 0);
int recip_shift;
const int32_t input2_inv =
(input2_val > 0) ? GetReciprocal(input2_val, 31, &recip_shift)
: -GetReciprocal(-input2_val, 31, &recip_shift);
const int headroom = CountLeadingSignBits(input1_val);
const int32_t unscaled_quotient =
MultiplyByQuantizedMultiplierGreaterThanOne(input1_val, input2_inv,
headroom);
const int total_shift = params.output_shift - recip_shift - headroom;
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplierSmallerThanOneExp(
unscaled_quotient, params.output_multiplier, total_shift);
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[SubscriptToIndex(output_desc, indexes)] =
static_cast<T>(clamped_output);
};
NDOpsHelper<N>(output_desc, div_func);
}
template <int N = 5>
inline void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const uint8_t* input1_data,
const RuntimeShape& unextended_input2_shape,
const uint8_t* input2_data,
const RuntimeShape& unextended_output_shape,
uint8_t* output_data) {
BroadcastDivSlowQuantized<uint8_t, N>(
params, unextended_input1_shape, input1_data, unextended_input2_shape,
input2_data, unextended_output_shape, output_data);
}
template <int N = 5>
inline void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const int8_t* input1_data,
const RuntimeShape& unextended_input2_shape,
const int8_t* input2_data,
const RuntimeShape& unextended_output_shape,
int8_t* output_data) {
BroadcastDivSlowQuantized<int8_t, N>(
params, unextended_input1_shape, input1_data, unextended_input2_shape,
input2_data, unextended_output_shape, output_data);
}
// TODO(jiawen): We can implement BroadcastDiv on buffers of arbitrary
// dimensionality if the runtime code does a single loop over one dimension
// that handles broadcasting as the base case. The code generator would then
// generate max(D1, D2) nested for loops.
template <typename T, int N = 5>
void BroadcastDivSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest
// stride, typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
auto div_func = [&](int indexes[N]) {
output_data[SubscriptToIndex(output_desc, indexes)] =
ActivationFunctionWithMinMax(
input1_data[SubscriptToIndex(desc1, indexes)] /
input2_data[SubscriptToIndex(desc2, indexes)],
output_activation_min, output_activation_max);
};
NDOpsHelper<N>(output_desc, div_func);
}
template <typename T>
inline void Div(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] / input2_data[i], output_activation_min,
output_activation_max);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_DIV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/div.h | C++ | apache-2.0 | 10,662 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Elu(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
output_data[i] = val < 0.0f ? TfLiteExpm1(val) : val;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ELU_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/elu.h | C++ | apache-2.0 | 1,412 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
#include <cmath>
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Exp(const T* input_data, const size_t num_elements,
T* output_data) {
ruy::profiler::ScopeLabel label("Exp");
for (size_t idx = 0; idx < num_elements; ++idx) {
output_data[idx] = std::exp(input_data[idx]);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_EXP_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/exp.h | C++ | apache-2.0 | 1,345 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
void Fill(const RuntimeShape& value_shape, const T* value_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_EQ(value_shape.DimensionsCount(), 0);
const int flat_size = output_shape.FlatSize();
for (int i = 0; i < flat_size; ++i) {
output_data[i] = *value_data;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FILL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/fill.h | C++ | apache-2.0 | 1,353 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
#include <cmath>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void Floor(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
int offset = i;
output_data[offset] = std::floor(input_data[offset]);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/floor.h | C++ | apache-2.0 | 1,370 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
#include <cmath>
#include <functional>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
T FloorDiv(T input1, T input2) {
return std::floor(std::divides<double>()(static_cast<double>(input1),
static_cast<double>(input2)));
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_DIV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/floor_div.h | C++ | apache-2.0 | 1,262 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
#include <cmath>
#include <functional>
namespace tflite {
namespace reference_ops {
template <typename T>
T FloorMod(T input1, T input2) {
struct FloatMod {
float operator()(const float lhs, const float rhs) const {
return std::fmod(lhs, rhs);
}
};
using ModFunc = typename std::conditional<std::is_integral<T>::value,
std::modulus<T>, FloatMod>::type;
ModFunc mod_func;
T trunc_mod = mod_func(input1, input2);
return (trunc_mod != 0) && ((input2 < 0) != (trunc_mod < 0))
? (trunc_mod + input2)
: trunc_mod;
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FLOOR_MOD_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/floor_mod.h | C++ | apache-2.0 | 1,529 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& weights_shape,
const float* weights_data, const RuntimeShape& bias_shape,
const float* bias_data, const RuntimeShape& output_shape,
float* output_data) {
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dims_count = output_shape.DimensionsCount();
const int weights_dims_count = weights_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dims_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dims_count - 2,
output_shape, output_dims_count - 1);
const int accum_depth = weights_shape.Dims(weights_dims_count - 1);
for (int b = 0; b < batches; ++b) {
for (int out_c = 0; out_c < output_depth; ++out_c) {
float total = 0.f;
for (int d = 0; d < accum_depth; ++d) {
total += input_data[b * accum_depth + d] *
weights_data[out_c * accum_depth + d];
}
float bias_value = 0.0f;
if (bias_data) {
bias_value = bias_data[out_c];
}
output_data[out_c + output_depth * b] = ActivationFunctionWithMinMax(
total + bias_value, output_activation_min, output_activation_max);
}
}
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& filter_shape,
const uint8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
uint8_t* output_data) {
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
for (int b = 0; b < batches; ++b) {
for (int out_c = 0; out_c < output_depth; ++out_c) {
int32_t acc = 0;
for (int d = 0; d < accum_depth; ++d) {
int32_t input_val = input_data[b * accum_depth + d];
int32_t filter_val = filter_data[out_c * accum_depth + d];
acc += (filter_val + filter_offset) * (input_val + input_offset);
}
if (bias_data) {
acc += bias_data[out_c];
}
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[out_c + output_depth * b] = static_cast<uint8_t>(acc);
}
}
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& filter_shape,
const uint8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int16_t* output_data) {
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(output_offset, 0);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(filter_shape, filter_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
for (int b = 0; b < batches; ++b) {
for (int out_c = 0; out_c < output_depth; ++out_c) {
// Internal accumulation.
// Initialize accumulator with the bias-value.
int32_t accum = bias_data[out_c];
// Accumulation loop.
for (int d = 0; d < accum_depth; ++d) {
int16_t input_val = input_data[b * accum_depth + d] + input_offset;
int16_t filter_val =
filter_data[out_c * accum_depth + d] + filter_offset;
accum += filter_val * input_val;
}
// Down-scale the final int32_t accumulator to the scale used by our
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
// multiplier and shift here have been pre-computed offline
// (e.g. by toco).
accum =
MultiplyByQuantizedMultiplier(accum, output_multiplier, output_shift);
// Saturate, cast to int16_t, and store to output array.
accum = std::max(accum, output_activation_min - output_offset);
accum = std::min(accum, output_activation_max - output_offset);
accum += output_offset;
output_data[out_c + output_depth * b] = accum;
}
}
}
inline void ShuffledFullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& weights_shape,
const uint8_t* shuffled_weights_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int16_t* output_data, uint8_t* shuffled_input_workspace_data) {
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_GE(input_shape.DimensionsCount(), 1);
TFLITE_DCHECK_GE(weights_shape.DimensionsCount(), 2);
TFLITE_DCHECK_GE(output_shape.DimensionsCount(), 1);
// TODO(b/62193649): This really should be:
// const int batches = ArraySize(output_dims, 1);
// but the current --variable_batch hack consists in overwriting the 3rd
// dimension with the runtime batch size, as we don't keep track for each
// array of which dimension is the batch dimension in it.
const int output_dim_count = output_shape.DimensionsCount();
const int weights_dim_count = weights_shape.DimensionsCount();
const int batches = FlatSizeSkipDim(output_shape, output_dim_count - 1);
const int output_depth = MatchingDim(weights_shape, weights_dim_count - 2,
output_shape, output_dim_count - 1);
const int accum_depth = weights_shape.Dims(weights_dim_count - 1);
TFLITE_DCHECK((accum_depth % 16) == 0);
TFLITE_DCHECK((output_depth % 4) == 0);
// Shuffling and xoring of input activations into the workspace buffer
uint8_t* shuffled_input_workspace_ptr = shuffled_input_workspace_data;
if (batches == 1) {
for (int i = 0; i < accum_depth; i++) {
shuffled_input_workspace_data[i] = input_data[i] ^ 0x80;
}
} else if (batches == 4) {
for (int c = 0; c < accum_depth; c += 16) {
for (int b = 0; b < 4; b++) {
const uint8_t* src_data_ptr = input_data + b * accum_depth + c;
for (int j = 0; j < 16; j++) {
uint8_t src_val = *src_data_ptr++;
// Flip the sign bit, so that the kernel will only need to
// reinterpret these uint8_t values as int8_t, getting for free the
// subtraction of the zero_point value 128.
uint8_t dst_val = src_val ^ 0x80;
*shuffled_input_workspace_ptr++ = dst_val;
}
}
}
} else {
TFLITE_DCHECK(false);
return;
}
// Actual computation
if (batches == 1) {
int16_t* output_ptr = output_data;
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
// so that just reinterpreting them as int8_t values is equivalent to
// subtracting 128 from them, thus implementing for free the subtraction of
// the zero_point value 128.
const int8_t* shuffled_weights_ptr =
reinterpret_cast<const int8_t*>(shuffled_weights_data);
// Likewise, we preshuffled and pre-xored the input data above.
const int8_t* shuffled_input_data =
reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
for (int c = 0; c < output_depth; c += 4) {
// Internal accumulation.
// Initialize accumulator with the bias-value.
int32_t accum[4] = {0};
// Accumulation loop.
for (int d = 0; d < accum_depth; d += 16) {
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 16; j++) {
int8_t input_val = shuffled_input_data[d + j];
int8_t weights_val = *shuffled_weights_ptr++;
accum[i] += weights_val * input_val;
}
}
}
for (int i = 0; i < 4; i++) {
// Add bias value
int32_t acc = accum[i] + bias_data[c + i];
// Down-scale the final int32_t accumulator to the scale used by our
// (16-bit, typically 3 integer bits) fixed-point format. The quantized
// multiplier and shift here have been pre-computed offline
// (e.g. by toco).
acc =
MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
// Saturate, cast to int16_t, and store to output array.
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_ptr[c + i] = acc;
}
}
} else if (batches == 4) {
int16_t* output_ptr = output_data;
// Shuffled weights have had their sign bit (0x80) pre-flipped (xor'd)
// so that just reinterpreting them as int8_t values is equivalent to
// subtracting 128 from them, thus implementing for free the subtraction of
// the zero_point value 128.
const int8_t* shuffled_weights_ptr =
reinterpret_cast<const int8_t*>(shuffled_weights_data);
// Likewise, we preshuffled and pre-xored the input data above.
const int8_t* shuffled_input_data =
reinterpret_cast<const int8_t*>(shuffled_input_workspace_data);
for (int c = 0; c < output_depth; c += 4) {
const int8_t* shuffled_input_ptr = shuffled_input_data;
// Accumulation loop.
// Internal accumulation.
// Initialize accumulator with the bias-value.
int32_t accum[4][4];
for (int i = 0; i < 4; i++) {
for (int b = 0; b < 4; b++) {
accum[i][b] = 0;
}
}
for (int d = 0; d < accum_depth; d += 16) {
for (int i = 0; i < 4; i++) {
for (int b = 0; b < 4; b++) {
for (int j = 0; j < 16; j++) {
int8_t input_val = shuffled_input_ptr[16 * b + j];
int8_t weights_val = shuffled_weights_ptr[16 * i + j];
accum[i][b] += weights_val * input_val;
}
}
}
shuffled_input_ptr += 64;
shuffled_weights_ptr += 64;
}
for (int i = 0; i < 4; i++) {
for (int b = 0; b < 4; b++) {
// Add bias value
int32_t acc = accum[i][b] + bias_data[c + i];
// Down-scale the final int32_t accumulator to the scale used by our
// (16-bit, typically 3 integer bits) fixed-point format. The
// quantized multiplier and shift here have been pre-computed offline
// (e.g. by toco).
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier,
output_shift);
// Saturate, cast to int16_t, and store to output array.
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_ptr[b * output_depth + c + i] = acc;
}
}
}
} else {
TFLITE_DCHECK(false);
return;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_FULLY_CONNECTED_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/fully_connected.h | C++ | apache-2.0 | 14,815 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_GATHER_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_GATHER_H_
#include <cstring>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
template <typename T, typename CoordsT = int32>
inline void Gather(const tflite::GatherParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& coords_shape, const CoordsT* coords_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("Gather");
int axis = op_params.axis;
if (axis < 0) {
axis += input_shape.DimensionsCount();
}
TFLITE_DCHECK_GE(axis, 0);
TFLITE_DCHECK_LT(axis, input_shape.DimensionsCount());
int batch_dims = op_params.batch_dims;
if (batch_dims < 0) {
batch_dims += coords_shape.DimensionsCount();
}
TFLITE_DCHECK_GE(batch_dims, 0);
TFLITE_DCHECK_LT(batch_dims, input_shape.DimensionsCount());
TFLITE_DCHECK_LE(batch_dims, coords_shape.DimensionsCount());
TFLITE_DCHECK_GE(axis, batch_dims);
for (int i = 0; i < batch_dims; ++i) {
TFLITE_DCHECK_EQ(input_shape.Dims(i), coords_shape.Dims(i));
}
const int axis_size = input_shape.Dims(axis);
int batch_size = 1;
for (int i = 0; i < batch_dims; ++i) {
batch_size *= input_shape.Dims(i);
}
int outer_size = 1;
for (int i = batch_dims; i < axis; ++i) {
outer_size *= input_shape.Dims(i);
}
int inner_size = 1;
for (int i = axis + 1; i < input_shape.DimensionsCount(); ++i) {
inner_size *= input_shape.Dims(i);
}
int coord_size = 1;
for (int i = batch_dims; i < coords_shape.DimensionsCount(); ++i) {
coord_size *= coords_shape.Dims(i);
}
for (int batch = 0; batch < batch_size; ++batch) {
for (int outer = 0; outer < outer_size; ++outer) {
for (int i = 0; i < coord_size; ++i) {
TFLITE_DCHECK_GE(coords_data[i], 0);
TFLITE_DCHECK_LT(coords_data[i], axis_size);
// TODO(rsun): replace memcpy with a for loop
std::memcpy(
output_data +
(((batch * outer_size) + outer) * coord_size + i) * inner_size,
input_data + (((batch * outer_size) + outer) * axis_size +
coords_data[batch * coord_size + i]) *
inner_size,
sizeof(T) * inner_size);
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_GATHER_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/gather.h | C++ | apache-2.0 | 3,313 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_ACTIVATIONS_H_
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline int16_t SaturatingLeftShift(int16_t value, int amount) {
int32_t result = static_cast<int32_t>(value) * (1 << amount);
result = std::min<int32_t>(result, std::numeric_limits<int16_t>::max());
result = std::max<int32_t>(result, std::numeric_limits<int16_t>::min());
return result;
}
// Similar to ARM instruction SQDMULH.
// Similar to gemmlowp::SaturatingRoundingDoublingHighMul except
// rounding to zero instead of to nearest (SQRDMULH).
inline std::int16_t SaturatingDoublingHighMul(std::int16_t a, std::int16_t b) {
bool overflow = a == b && a == std::numeric_limits<std::int16_t>::min();
std::int32_t a_32(a);
std::int32_t b_32(b);
std::int32_t ab_32 = a_32 * b_32;
std::int16_t ab_x2_high16 = static_cast<std::int16_t>((ab_32) / (1 << 15));
return overflow ? std::numeric_limits<std::int16_t>::max() : ab_x2_high16;
}
template <typename T>
inline void HardSwish(const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("ReferenceHardSwish/Float");
auto matching_size = MatchingFlatSize(input_shape, output_shape);
const T* in_end = input_data + matching_size;
for (; input_data < in_end; input_data++, output_data++) {
const float in = *input_data;
*output_data =
in * std::min(static_cast<T>(6), std::max(static_cast<T>(0), in + 3)) /
6;
}
}
template <typename T>
inline void HardSwish(const HardSwishParams& params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("ReferenceHardSwish/Quantized");
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const int16_t input_value = input_data[i] - params.input_zero_point;
// Left-shift as much as we can without overflow/saturation to put
// significant bits in the high bits of our 16-bit fixedpoint values, so
// that fixed-point approximate computations below are as accurate as
// possible.
const int16_t input_value_on_hires_input_scale = input_value * (1 << 7);
// Compute the input value on essentially the output scale, just not
// right-shifted yet. This is the value that we'll use in the (x >= +3)
// case, and that in the general case we'll multiply against the "relu-ish"
// fixed-point multiplier in [0, 1].
const int16_t input_value_on_preshift_output_scale =
gemmlowp::SaturatingRoundingDoublingHighMul(
input_value_on_hires_input_scale,
params.output_multiplier_fixedpoint_int16);
// Now compute the "relu-ish multiplier". In the (-3 <= x <= +3) case, that
// is just an affine rescaling of x from [-3, 3] to [0, 1]. In the general
// case, it is just that plus saturation at the boundaries of [-3, 3].
// First, we rescale from [-3, 3] to [-1, 1], saturating.
// That is done by rescaling the input value with a fixed-point multiplier
// (reluish_multiplier_fixedpoint) and bit-shift such that we represent
// that input value on the scale where the real value 3.0f is represented
// by the quantized value 32768. (+32768 is actually not representable as
// int16_t, so this saturates at +32767, and that is seen empirically to be
// a negligible contribution to numerical error/bias).
//
// This code is careful to correctly implement any magnitude of multiplier,
// involving either a right shift or a left shift, with correct saturation
// behavior in the left-shift case. This forces this code to be more
// complicated, but is necessary for real applications: a partially
// trained quantized MobileNet v3-small model that motivated this code
// exhibits some large [min, max] range boundaries, of the order of
// magnitude of 10 or 100 depending on layers.
//
// The next few lines are basically just an ordinary
// MultiplyByQuantizedMultiplier, except that we are more careful here
// about the fine details of saturation when left-shifting, because here
// overflow in left-shift is a common case, not an anomaly as
// MultiplyByQuantizedMultiplier assumes.
int16_t reluish_value = input_value_on_hires_input_scale;
// Shift left, saturating, as much as we can while ensuring that this
// saturation will not contribute to the result. That is, left shift amount
// reduced by 1.
if (params.reluish_multiplier_exponent > 0) {
reluish_value = SaturatingLeftShift(
reluish_value, params.reluish_multiplier_exponent - 1);
}
// Apply the fixed-point multiplier, dividing the value by a divisor
// ranging in [1, 2].
reluish_value = gemmlowp::SaturatingRoundingDoublingHighMul(
reluish_value, params.reluish_multiplier_fixedpoint_int16);
// Apply the last bit of left-shift. Thus, in the left-shifting case, if
// any saturation affects the result, it is happening here --- any
// saturation having occurred above is overwritten here, not affecting the
// result.
if (params.reluish_multiplier_exponent > 0) {
reluish_value = SaturatingLeftShift(reluish_value, 1);
}
// Shift right, in the right-shifting case.
if (params.reluish_multiplier_exponent < 0) {
reluish_value = gemmlowp::RoundingDivideByPOT(
reluish_value, -params.reluish_multiplier_exponent);
}
// At this point we have rescaled the value into a 16bit fixedpoint
// reluish_value in [-1, 1].
// We now convert that to a 16bit fixedpoint value in [0, 1].
reluish_value = (reluish_value + (1 << 15)) >> 1;
// Use of SaturatingDoublingHighMul here is important to cancel the biases
// from the above SaturatingRoundingDoublingHighMul.
//
// On a partially trained MobileNet-v3-small,
//
// | bias on | ImageNet
// | quantized | Top-1
// Operation used here | values | accuracy (50k)
// --------------------------------------+------------+-----------
// SaturatingDoublingHighMul | -0.0024 | 58.920
// SaturatingRoundingDoublingHighMul | -0.0067 | 58.064
//
// In activations_test, this is covered by this testcase:
// QuantizedActivationsOpTest.HardSwishBias
//
const int16_t preshift_output_value = SaturatingDoublingHighMul(
reluish_value, input_value_on_preshift_output_scale);
// We were so far operating on the pre-shift output scale. Now we finally
// apply that output shift, arriving at the final output scale.
int16_t output_value = gemmlowp::RoundingDivideByPOT(
preshift_output_value, -params.output_multiplier_exponent);
output_value += params.output_zero_point;
output_value =
std::min<int16_t>(output_value, std::numeric_limits<T>::max());
output_value =
std::max<int16_t>(output_value, std::numeric_limits<T>::min());
output_data[i] = output_value;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/hard_swish.h | C++ | apache-2.0 | 8,259 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_integer_ops {
inline void CheckArithmeticParams(const ArithmeticParams& params) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
// Input offset is negative input zero point. Activation tensors are
// asymmetric quantized so they span the full int8 range.
TFLITE_DCHECK_GE(-params.input1_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_GE(-params.input2_offset, std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(-params.input1_offset, std::numeric_limits<int8_t>::max());
TFLITE_DCHECK_LE(-params.input2_offset, std::numeric_limits<int8_t>::max());
}
inline void ElementWise(
int size, const ArithmeticParams& params, const int8_t* input1_data,
const int8_t* input2_data, int8_t* output_data,
void (*check_arithmetic_params)(const ArithmeticParams&),
int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) {
CheckArithmeticParams(params);
for (int i = 0; i < size; ++i) {
output_data[i] = binary_func(input1_data[i], input2_data[i], params);
}
}
inline void BroadcastBinaryFunction4DSlow(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const int8_t* input1_data, const RuntimeShape& input2_shape,
const int8_t* input2_data, const RuntimeShape& output_shape,
int8_t* output_data,
void (*check_arithmetic_params)(const ArithmeticParams&),
int8_t (*binary_func)(int8_t, int8_t, const ArithmeticParams&)) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
&desc2);
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
// typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
//
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
output_data[Offset(extended_output_shape, b, y, x, c)] = binary_func(
input1_data[SubscriptToIndex(desc1, b, y, x, c)],
input2_data[SubscriptToIndex(desc2, b, y, x, c)], params);
}
}
}
}
}
inline int8_t AddFunc(int8_t x, int8_t y, const ArithmeticParams& params) {
const int32_t input1_val = params.input1_offset + x;
const int32_t input2_val = params.input2_offset + y;
const int32_t shifted_input1_val = input1_val * (1 << params.left_shift);
const int32_t shifted_input2_val = input2_val * (1 << params.left_shift);
const int32_t scaled_input1_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input1_val, params.input1_multiplier, params.input1_shift);
const int32_t scaled_input2_val =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
shifted_input2_val, params.input2_multiplier, params.input2_shift);
const int32_t raw_sum = scaled_input1_val + scaled_input2_val;
const int32_t raw_output =
MultiplyByQuantizedMultiplierSmallerThanOneExp(
raw_sum, params.output_multiplier, params.output_shift) +
params.output_offset;
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, raw_output));
return static_cast<int8_t>(clamped_output);
}
// Element-wise add that can often be used for inner loop of broadcast add as
// well as the non-broadcast add.
inline void AddElementwise(int size, const ArithmeticParams& params,
const int8_t* input1_data, const int8_t* input2_data,
int8_t* output_data) {
ElementWise(size, params, input1_data, input2_data, output_data,
CheckArithmeticParams, AddFunc);
}
inline void Add(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int8_t* input1_data,
const RuntimeShape& input2_shape, const int8_t* input2_data,
const RuntimeShape& output_shape, int8_t* output_data) {
CheckArithmeticParams(params);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
AddElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void BroadcastAdd4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const int8_t* input1_data,
const RuntimeShape& input2_shape,
const int8_t* input2_data,
const RuntimeShape& output_shape,
int8_t* output_data) {
BroadcastBinaryFunction4DSlow(params, input1_shape, input1_data, input2_shape,
input2_data, output_shape, output_data,
CheckArithmeticParams, AddFunc);
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_ADD_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/add.h | C++ | apache-2.0 | 6,598 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
// Fixed-point per-channel-quantization convolution reference kernel.
inline void ConvPerChannel(
const ConvParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int8_t* output_data) {
// Get parameters.
const int32_t input_offset = params.input_offset; // r = s(q - Z)
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int32_t output_offset = params.output_offset;
// Set min and max value of the output.
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
// Consistency check.
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
// Check dimensions of the tensors.
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * stride_width) - pad_width;
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (!is_point_inside_image) {
continue;
}
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
int32_t input_val = input_data[Offset(input_shape, batch, in_y,
in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, out_channel, filter_y, filter_x, in_channel)];
// Accumulate with 32 bits accumulator.
// In the nudging process during model quantization, we force
// real value of 0.0 be represented by a quantized value. This
// guarantees that the input_offset is a int8_t, even though
// it is represented using int32_t. int32_t += int8_t *
// (int8_t - int8_t) so the highest value we can get from each
// accumulation is [-127, 127] * ([-128, 127] -
// [-128, 127]), which is [-32512, 32512]. log2(32512)
// = 14.98, which means we can accumulate at least 2^16
// multiplications without overflow. The accumulator is
// applied to a filter so the accumulation logic will hold as
// long as the filter size (filter_y * filter_x * in_channel)
// does not exceed 2^16, which is the case in all the models
// we have seen so far.
// TODO(b/174275578): Add a check to make sure the
// accumulator depth is smaller than 2^16.
acc += filter_val * (input_val + input_offset);
}
}
}
if (bias_data) {
acc += bias_data[out_channel];
}
acc = MultiplyByQuantizedMultiplier(
acc, output_multiplier[out_channel], output_shift[out_channel]);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
static_cast<int8_t>(acc);
}
}
}
}
}
// Fixed-point per-channel-quantization convolution reference kernel.
// 16-bit data and 8-bit filter
inline void ConvPerChannel(
const ConvParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int16_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const std::int64_t* bias_data, const RuntimeShape& output_shape,
int16_t* output_data) {
// Get parameters.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
// Set min and max value of the output.
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
// Consistency check.
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
// Check dimensions of the tensors.
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
const int in_y_origin = (out_y * stride_height) - pad_height;
for (int out_x = 0; out_x < output_width; ++out_x) {
const int in_x_origin = (out_x * stride_width) - pad_width;
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
std::int64_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
const int in_y = in_y_origin + dilation_height_factor * filter_y;
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (!is_point_inside_image) {
continue;
}
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
int32_t input_val = input_data[Offset(input_shape, batch, in_y,
in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, out_channel, filter_y, filter_x, in_channel)];
// Accumulate with 64 bits accumulator.
// int64_t += int8_t * int16_t so the highest value we can
// get from each accumulation is [-127, 127] * ([-32768,
// 32767] -
// [-32768, 32767]), which is [-8322945, 8322945].
// log2(8322945) = 22.99.
acc += filter_val * input_val;
}
}
}
if (bias_data) {
acc += bias_data[out_channel];
}
int32_t scaled_acc = MultiplyByQuantizedMultiplier(
acc, output_multiplier[out_channel], output_shift[out_channel]);
scaled_acc = std::max(scaled_acc, output_activation_min);
scaled_acc = std::min(scaled_acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
static_cast<int16_t>(scaled_acc);
}
}
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h | C++ | apache-2.0 | 10,470 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void DepthwiseConvPerChannel(
const DepthwiseParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int8_t* output_data) {
// Get parameters.
// TODO(b/141565753): Re-introduce ScopedProfilingLabel on Micro.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32_t input_offset = params.input_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
// Check dimensions of the tensors.
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
for (int m = 0; m < depth_multiplier; ++m) {
const int output_channel = m + in_channel * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (is_point_inside_image) {
int32_t input_val = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, output_channel)];
// Accumulate with 32 bits accumulator.
// In the nudging process during model quantization, we force
// real value of 0.0 be represented by a quantized value. This
// guarantees that the input_offset is a int8_t, even though
// it is represented using int32_t. int32_t += int8_t *
// (int8_t - int8_t) so the highest value we can get from each
// accumulation is [-127, 127] * ([-128, 127] -
// [-128, 127]), which is [-32512, 32512]. log2(32512)
// = 14.98, which means we can accumulate at least 2^16
// multiplications without overflow. The accumulator is
// applied to a filter so the accumulation logic will hold as
// long as the filter size (filter_y * filter_x * in_channel)
// does not exceed 2^16, which is the case in all the models
// we have seen so far.
// TODO(b/174275578): Add a check to make sure the
// accumulator depth is smaller than 2^16.
acc += filter_val * (input_val + input_offset);
}
}
}
if (bias_data) {
acc += bias_data[output_channel];
}
acc = MultiplyByQuantizedMultiplier(
acc, output_multiplier[output_channel],
output_shift[output_channel]);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x,
output_channel)] = static_cast<int8_t>(acc);
}
}
}
}
}
}
inline void DepthwiseConvPerChannel(
const DepthwiseParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int16_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const std::int64_t* bias_data, const RuntimeShape& output_shape,
int16_t* output_data) {
// Get parameters.
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
// Check dimensions of the tensors.
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
for (int m = 0; m < depth_multiplier; ++m) {
const int output_channel = m + in_channel * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
std::int64_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (is_point_inside_image) {
int32_t input_val = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, output_channel)];
// Accumulate with 64 bits accumulator.
// We assume maximum of 2^16 accumulations as with the 8-bit
// case so actually the value in the accumulator should not
// exceed 40 bits
acc += static_cast<int64_t>(filter_val) *
static_cast<int64_t>(input_val);
}
}
}
if (bias_data) {
acc += bias_data[output_channel];
}
int32_t scaled_acc = MultiplyByQuantizedMultiplier(
acc, output_multiplier[output_channel],
output_shift[output_channel]);
scaled_acc = std::max(scaled_acc, output_activation_min);
scaled_acc = std::min(scaled_acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x,
output_channel)] =
static_cast<int16_t>(scaled_acc);
}
}
}
}
}
}
inline void DepthwiseConvHybridPerChannel(
const DepthwiseParams& params, float* scaling_factors_ptr,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& filter_shape, const int8_t* filter_data,
const RuntimeShape& bias_shape, const float* bias_data,
const RuntimeShape& output_shape, float* output_data,
const float* per_channel_scale, int32_t* input_offset) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int dilation_width_factor = params.dilation_width_factor;
const int dilation_height_factor = params.dilation_height_factor;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
const int depth_multiplier = params.depth_multiplier;
const float output_activation_min = params.float_activation_min;
const float output_activation_max = params.float_activation_max;
// Check dimensions of the tensors.
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int output_depth = MatchingDim(filter_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int input_depth = input_shape.Dims(3);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int bias_depth = bias_shape.FlatSize();
TFLITE_DCHECK_EQ(output_depth, input_depth * depth_multiplier);
TFLITE_DCHECK_EQ(bias_depth, output_depth);
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
for (int m = 0; m < depth_multiplier; ++m) {
const int output_channel = m + in_channel * depth_multiplier;
const int in_x_origin = (out_x * stride_width) - pad_width;
const int in_y_origin = (out_y * stride_height) - pad_height;
int32_t acc = 0;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
const int in_x = in_x_origin + dilation_width_factor * filter_x;
const int in_y =
in_y_origin + dilation_height_factor * filter_y;
// Zero padding by omitting the areas outside the image.
const bool is_point_inside_image =
(in_x >= 0) && (in_x < input_width) && (in_y >= 0) &&
(in_y < input_height);
if (is_point_inside_image) {
int32_t input_val = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
int32_t filter_val = filter_data[Offset(
filter_shape, 0, filter_y, filter_x, output_channel)];
acc += filter_val * (input_val - input_offset[batch]);
}
}
}
float acc_float = static_cast<float>(acc);
acc_float *=
per_channel_scale[output_channel] * scaling_factors_ptr[batch];
if (bias_data && output_channel < bias_depth) {
acc_float += bias_data[output_channel];
}
output_data[Offset(output_shape, batch, out_y, out_x,
output_channel)] =
ActivationFunctionWithMinMax(acc_float, output_activation_min,
output_activation_max);
}
}
}
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEPTHWISE_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/depthwise_conv.h | C++ | apache-2.0 | 14,578 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_integer_ops {
template <typename T>
inline void Dequantize(const tflite::DequantizationParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int32 zero_point = op_params.zero_point;
const double scale = op_params.scale;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const int32 val = static_cast<int32>(input_data[i]);
const float result = static_cast<float>(scale * (val - zero_point));
output_data[i] = result;
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_DEQUANTIZE_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/dequantize.h | C++ | apache-2.0 | 1,740 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int8_t* output_data) {
const int32_t input_offset = params.input_offset;
const int32_t filter_offset = params.weights_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = output_shape.Dims(0);
const int output_depth = output_shape.Dims(1);
TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2));
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
for (int b = 0; b < batches; ++b) {
for (int out_c = 0; out_c < output_depth; ++out_c) {
int32_t acc = 0;
for (int d = 0; d < accum_depth; ++d) {
int32_t input_val = input_data[b * accum_depth + d];
int32_t filter_val = filter_data[out_c * accum_depth + d];
acc += (filter_val + filter_offset) * (input_val + input_offset);
}
if (bias_data) {
acc += bias_data[out_c];
}
acc = MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[out_c + output_depth * b] = static_cast<int8_t>(acc);
}
}
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const int16_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int64_t* bias_data, const RuntimeShape& output_shape,
int16_t* output_data) {
const int32_t filter_offset = params.weights_offset;
const int32_t output_multiplier = params.output_multiplier;
const int output_shift = params.output_shift;
const int32_t output_activation_min = params.quantized_activation_min;
const int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_GE(filter_shape.DimensionsCount(), 2);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 2);
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int filter_dim_count = filter_shape.DimensionsCount();
const int batches = output_shape.Dims(0);
const int output_depth = output_shape.Dims(1);
TFLITE_DCHECK_LE(output_depth, filter_shape.Dims(filter_dim_count - 2));
const int accum_depth = filter_shape.Dims(filter_dim_count - 1);
for (int b = 0; b < batches; ++b) {
for (int out_c = 0; out_c < output_depth; ++out_c) {
int64_t acc = 0;
for (int d = 0; d < accum_depth; ++d) {
int32_t input_val = input_data[b * accum_depth + d];
int32_t filter_val = filter_data[out_c * accum_depth + d];
acc += (filter_val + filter_offset) * input_val;
}
if (bias_data) {
acc += bias_data[out_c];
}
int32_t acc_scaled =
MultiplyByQuantizedMultiplier(acc, output_multiplier, output_shift);
acc_scaled = std::max(acc_scaled, output_activation_min);
acc_scaled = std::min(acc_scaled, output_activation_max);
output_data[out_c + output_depth * b] = static_cast<int16_t>(acc_scaled);
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_FULLY_CONNECTED_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/fully_connected.h | C++ | apache-2.0 | 4,887 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void L2Normalization(int32_t input_zero_point, int32_t outer_size,
int32_t depth, const int8_t* input_data,
int8_t* output_data) {
static constexpr int8_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int8_t kMaxInt8 = std::numeric_limits<int8_t>::max();
// The output scale must be in sync with Prepare().
// Output is in 1/128 scale so the actual output range is nudged from [-1, 1]
// to [-1, 127/128].
static constexpr int32_t kOutputScale = 7;
for (int outer_index = 0; outer_index < outer_size; ++outer_index) {
// int32_t = (int8_t - int8_t) ^ 2.
// ([-128, 127] - [-128, 127]) ^ 2 = [0, (2^8 - 1)^2] so the accumulator is
// safe from overflowing in at least 2^16 steps.
int32_t acc = 0;
for (int inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input =
input_data[depth * outer_index + inner_index] - input_zero_point;
acc += input * input;
}
int32_t inv_l2norm_multiplier;
int inv_l2norm_shift;
GetInvSqrtQuantizedMultiplierExp(acc, kReverseShift, &inv_l2norm_multiplier,
&inv_l2norm_shift);
for (int inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input =
input_data[depth * outer_index + inner_index] - input_zero_point;
// Rescale and downcast. Rescale is folded into the division.
int32_t output_in_q24 = MultiplyByQuantizedMultiplier(
input, inv_l2norm_multiplier, inv_l2norm_shift + kOutputScale);
output_in_q24 =
std::min(static_cast<int32_t>(kMaxInt8),
std::max(static_cast<int32_t>(kMinInt8), output_in_q24));
output_data[depth * outer_index + inner_index] =
static_cast<int8_t>(output_in_q24);
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_L2NORMALIZATION_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/l2normalization.h | C++ | apache-2.0 | 2,929 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOG_SOFTMAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOG_SOFTMAX_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void LogSoftmax(int32_t input_multiplier, int32_t input_shift,
int32_t reverse_multiplier, int32_t reverse_shift,
int32_t diff_min, int32_t outer_size, int32_t depth,
const int8* input_data, int8* output_data) {
static constexpr int8_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int8_t kMaxInt8 = std::numeric_limits<int8_t>::max();
static constexpr int32_t kMinInt32 = std::numeric_limits<int32_t>::min();
// [-16, 0] is mapped to [-128, 127] with 1/16 as scale and 127 as zero
// point. This nudges the output to [-255/16, 0].
static constexpr int32_t kOutputZeroPoint = 127;
// All IntegerBits must agree with Prepare function.
// Input is chosen as Q5.26 so exp(-1 * 2^5 * 2^-1) = exp(-16) is negligible.
static constexpr int kInputIntegerBits = 5;
static constexpr int kAccumulationIntegerBits = 12;
static constexpr int kOutputIntegerBits = 4;
using F5 = gemmlowp::FixedPoint<int32, kInputIntegerBits>;
using F12 = gemmlowp::FixedPoint<int32, kAccumulationIntegerBits>;
for (int outer_index = 0; outer_index < outer_size; ++outer_index) {
int8 max_in_row = kMinInt8;
for (int inner_index = 0; inner_index < depth; ++inner_index) {
max_in_row =
std::max(max_in_row, input_data[outer_index * depth + inner_index]);
}
// Accumulator "sum_of_exps_in_q12" is safe from overflowing in 2^12 steps.
F12 sum_of_exps_in_q12 = F12::FromRaw(0);
for (int inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input_diff =
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
max_in_row;
if (input_diff >= diff_min) {
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
input_diff, input_multiplier, input_shift);
sum_of_exps_in_q12 =
sum_of_exps_in_q12 +
gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(F5::FromRaw(input_diff_in_q5)));
}
}
const int32_t log_sum_of_exps_in_q5 =
log_x_for_x_greater_than_or_equal_to_1<kInputIntegerBits>(
sum_of_exps_in_q12)
.raw();
// Potentially reduced the valid range. shifted_log_sum_of_exps_in_q5 is
// smallest representable in Q5.26 plus the log_sum_of_exps.
const int32_t shifted_log_sum_of_exps_in_q5 =
log_sum_of_exps_in_q5 + kMinInt32;
const int32_t adjusted_diff_min = std::max(
diff_min - 1,
MultiplyByQuantizedMultiplier(shifted_log_sum_of_exps_in_q5,
reverse_multiplier, -reverse_shift));
for (int inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input_diff =
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
max_in_row;
// Note use of > below instead of >= above.
if (input_diff > adjusted_diff_min) {
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
input_diff, input_multiplier, input_shift);
// Rescale and downcast.
int32_t output_in_q27 =
gemmlowp::RoundingDivideByPOT(
(input_diff_in_q5 - log_sum_of_exps_in_q5),
31 - kInputIntegerBits - kOutputIntegerBits) +
kOutputZeroPoint;
output_in_q27 =
std::max(std::min(output_in_q27, static_cast<int32_t>(kMaxInt8)),
static_cast<int32_t>(kMinInt8));
output_data[outer_index * depth + inner_index] =
static_cast<int8_t>(output_in_q27);
} else {
output_data[outer_index * depth + inner_index] = kMinInt8;
}
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOG_SOFTMAX_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/log_softmax.h | C++ | apache-2.0 | 4,807 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void Logistic(int32_t input_zero_point, int32_t input_range_radius,
int32_t input_multiplier, int32_t input_left_shift,
int32_t input_size, const int8_t* input_data,
int8_t* output_data) {
// Integer bits must be in sync with Prepare() function.
static constexpr int32_t kInputIntegerBits = 4;
static constexpr int32_t kOutputIntegerBits = 8;
static constexpr int8_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int8_t kMaxInt8 = std::numeric_limits<int8_t>::max();
static constexpr int32_t kOutputZeroPoint = -128;
for (int i = 0; i < input_size; ++i) {
const int32_t input =
static_cast<int32_t>(input_data[i]) - input_zero_point;
if (input <= -input_range_radius) {
output_data[i] = kMinInt8;
} else if (input >= input_range_radius) {
output_data[i] = kMaxInt8;
} else {
const int32_t input_in_q4 = MultiplyByQuantizedMultiplier(
input, input_multiplier, input_left_shift);
using FixedPoint4 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
const int32_t output_in_q0 =
gemmlowp::logistic(FixedPoint4::FromRaw(input_in_q4)).raw();
// Rescale and downcast.
using gemmlowp::RoundingDivideByPOT;
int32_t output_in_q23 =
RoundingDivideByPOT(output_in_q0, 31 - kOutputIntegerBits);
output_in_q23 = std::min(std::max(output_in_q23 + kOutputZeroPoint,
static_cast<int32_t>(kMinInt8)),
static_cast<int32_t>(kMaxInt8));
output_data[i] = static_cast<int8_t>(output_in_q23);
}
}
}
inline void Logistic(int32_t input_multiplier, int32_t input_left_shift,
int32_t input_size, const int16_t* ptr_input_data,
int16_t* ptr_output_data) {
// We use the LUT for sigmoid and take into account, that
// tanh(x) = 2*sigmoid(2*x) - 1
// We scale by 3/4 to expand range [-8,8]->[-10.7,10.7].
// In case of general parameter scale, multiplier 3 is taken into account
// in TanhPrepare function and it is included in
// input_multiplier already.
TFLITE_DCHECK_GE(input_left_shift, 0);
if (input_multiplier == 0) { // power of two case
input_multiplier = 3 << input_left_shift;
input_left_shift = 0;
}
int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0;
for (int i = 0; i < input_size; ++i, ptr_input_data++, ptr_output_data++) {
int32_t input_data =
((*ptr_input_data) * input_multiplier + round) >> input_left_shift;
// We do interpolation on unsigned values.
uint32_t abs_input_data = abs(input_data);
// We divide by 2 power of 9, because
// we need to divide by 2 in power of 7 for
// the input conversion + 1/4 from the scale above.
// Define uh as uint32_t type not to make this function overflow.
uint32_t uh = abs_input_data >> 9;
uint32_t result;
if (uh >= 255) {
// Saturate to maximum.
result = 0x7FFF << 10;
} else {
uint32_t ua = sigmoid_table_uint16[uh];
uint32_t ub = sigmoid_table_uint16[uh + 1];
uint32_t ut = abs_input_data & 0x1ff;
// Interpolation is done using the fractional bit.
result = (ua << 9) + ut * (ub - ua);
}
result = (input_data >= 0) ? (result + (1 << 9))
: ((1 << (16 + 9)) - result + (1 << 9) - 1);
// Back to 16-bit.
result >>= 10;
*ptr_output_data = result;
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_LOGISTIC_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/logistic.h | C++ | apache-2.0 | 4,618 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
template <typename integer_type>
inline void Mean(const tflite::MeanParams& op_params, int32_t multiplier,
int32_t shift, const RuntimeShape& unextended_input_shape,
const integer_type* input_data, int32_t input_zero_point,
const RuntimeShape& unextended_output_shape,
integer_type* output_data, int32_t output_zero_point) {
// Current implementation only supports dimension equals 4 and simultaneous
// reduction over width and height.
TFLITE_CHECK_EQ(unextended_input_shape.DimensionsCount(), 4);
TFLITE_CHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape input_shape =
RuntimeShape::ExtendedShape(4, unextended_input_shape);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
const int output_batch = output_shape.Dims(0);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int output_depth = output_shape.Dims(3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int num_elements_in_axis = input_width * input_height;
TFLITE_CHECK_EQ(op_params.axis_count, 2);
TFLITE_CHECK((op_params.axis[0] == 1 && op_params.axis[1] == 2) ||
(op_params.axis[0] == 2 && op_params.axis[1] == 1));
TFLITE_CHECK_EQ(output_height, 1);
TFLITE_CHECK_EQ(output_width, 1);
static constexpr int32_t kMinInt = std::numeric_limits<integer_type>::min();
static constexpr int32_t kMaxInt = std::numeric_limits<integer_type>::max();
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
int32_t acc = 0;
for (int in_h = 0; in_h < input_height; ++in_h) {
for (int in_w = 0; in_w < input_width; ++in_w) {
acc += input_data[Offset(input_shape, out_b, in_h, in_w, out_d)] -
input_zero_point;
}
}
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
acc = acc > 0 ? (acc + num_elements_in_axis / 2) / num_elements_in_axis
: (acc - num_elements_in_axis / 2) / num_elements_in_axis;
acc += output_zero_point;
acc = std::min(std::max(acc, kMinInt), kMaxInt);
output_data[Offset(output_shape, out_b, 0, 0, out_d)] =
static_cast<integer_type>(acc);
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MEAN_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/mean.h | C++ | apache-2.0 | 3,466 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
#include "fixedpoint/fixedpoint.h"
#include "ruy/profiler/instrumentation.h" // from @ruy
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
template <typename T>
inline void MulElementwise(int size, const ArithmeticParams& params,
const T* input1_data, const T* input2_data,
T* output_data) {
for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
const int32_t input2_val = params.input2_offset + input2_data[i];
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<T>(clamped_output);
}
}
template <typename T>
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
ruy::profiler::ScopeLabel label("Mul/8bit");
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
}
// Mul with 16 bit inputs and int8_t outputs.
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const int16_t* input1_data,
const RuntimeShape& input2_shape, const int16_t* input2_data,
const RuntimeShape& output_shape, int8_t* output_data) {
ruy::profiler::ScopeLabel label("Mul/Int16Int8");
int32_t output_offset = params.output_offset;
int32_t output_activation_min = params.quantized_activation_min;
int32_t output_activation_max = params.quantized_activation_max;
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int flat_size =
MatchingElementsSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
F0 unclamped_result =
F0::FromRaw(input1_data[i]) * F0::FromRaw(input2_data[i]);
int16_t rescaled_result =
gemmlowp::RoundingDivideByPOT(unclamped_result.raw(), 8);
int16_t clamped_result = std::min<int16_t>(
output_activation_max - output_offset, rescaled_result);
clamped_result = std::max<int16_t>(output_activation_min - output_offset,
clamped_result);
output_data[i] = output_offset + clamped_result;
}
}
template <typename T>
inline void BroadcastMul4DSlow(
const ArithmeticParams& params, const RuntimeShape& input1_shape,
const T* input1_data, const RuntimeShape& input2_shape,
const T* input2_data, const RuntimeShape& output_shape, T* output_data) {
ruy::profiler::ScopeLabel label("BroadcastMul4DSlow");
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
// The input shapes are extended as part of NdArrayDesc initialization.
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
&desc2);
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
const int32_t input1_val =
params.input1_offset +
input1_data[SubscriptToIndex(desc1, b, y, x, c)];
const int32_t input2_val =
params.input2_offset +
input2_data[SubscriptToIndex(desc2, b, y, x, c)];
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32_t clamped_output = std::min(
params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[Offset(extended_output_shape, b, y, x, c)] =
static_cast<T>(clamped_output);
}
}
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_MUL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/mul.h | C++ | apache-2.0 | 5,760 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const int8_t* input_data,
const RuntimeShape& output_shape, int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
int32_t acc = 0;
int filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
acc +=
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
filter_count++;
}
}
// Round to the closest integer value.
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
: (acc - filter_count / 2) / filter_count;
acc = std::max(acc, params.quantized_activation_min);
acc = std::min(acc, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<int8_t>(acc);
}
}
}
}
}
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& output_shape,
int8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_GE(params.quantized_activation_min,
std::numeric_limits<int8_t>::min());
TFLITE_DCHECK_LE(params.quantized_activation_max,
std::numeric_limits<int8_t>::max());
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
int8_t max = std::numeric_limits<int8_t>::lowest();
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
max = std::max(
max,
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
}
}
max = std::max<int8_t>(max, params.quantized_activation_min);
max = std::min<int8_t>(max, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<int8_t>(max);
}
}
}
}
}
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const int16_t* input_data,
const RuntimeShape& output_shape,
int16_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
int32_t acc = 0;
int filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
acc +=
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
filter_count++;
}
}
// Round to the closest integer value.
acc = acc > 0 ? (acc + filter_count / 2) / filter_count
: (acc - filter_count / 2) / filter_count;
acc = std::max(acc, params.quantized_activation_min);
acc = std::min(acc, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<int16_t>(acc);
}
}
}
}
}
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const int16_t* input_data, const RuntimeShape& output_shape,
int16_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_GE(params.quantized_activation_min,
std::numeric_limits<int16_t>::min());
TFLITE_DCHECK_LE(params.quantized_activation_max,
std::numeric_limits<int16_t>::max());
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
int16_t max = std::numeric_limits<int16_t>::lowest();
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
max = std::max(
max,
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
}
}
max = std::max<int16_t>(max, params.quantized_activation_min);
max = std::min<int16_t>(max, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<int16_t>(max);
}
}
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_POOLING_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h | C++ | apache-2.0 | 12,291 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
#include <limits>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
inline void Tanh(int32_t input_zero_point, int32_t input_range_radius,
int32_t input_multiplier, int32_t input_shift,
const RuntimeShape& input_shape, const int8_t* input_data,
const RuntimeShape& output_shape, int8_t* output_data) {
// Integer bits must be in sync with Prepare() function.
static constexpr int32_t kInputIntegerBits = 4;
static constexpr int32_t kOutputScale = 7;
static constexpr int32_t kMinInt8 = std::numeric_limits<int8_t>::min();
static constexpr int32_t kMaxInt8 = std::numeric_limits<int8_t>::max();
using F4 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const int32_t input =
static_cast<int32_t>(input_data[i]) - input_zero_point;
if (input <= -input_range_radius) {
output_data[i] = kMinInt8;
} else if (input >= input_range_radius) {
output_data[i] = kMaxInt8;
} else {
const int32_t input_in_q4 =
MultiplyByQuantizedMultiplier(input, input_multiplier, input_shift);
const int32_t output_in_q0 =
gemmlowp::tanh(F4::FromRaw(input_in_q4)).raw();
// Rescale and downcast.
using gemmlowp::RoundingDivideByPOT;
int32_t output_in_q24 =
RoundingDivideByPOT(output_in_q0, 31 - kOutputScale);
output_in_q24 = std::min(std::max(output_in_q24, kMinInt8), kMaxInt8);
output_data[i] = static_cast<int8_t>(output_in_q24);
}
}
}
inline void Tanh(int32_t input_multiplier, int32_t input_left_shift,
const RuntimeShape& input_shape, const int16_t* ptr_input_data,
const RuntimeShape& output_shape, int16_t* ptr_output_data) {
// We use the LUT for sigmoid and take into account, that
// tanh(x) = 2*sigmoid(2*x) - 1
// We scale by 3/4 to expand range [-8,8]->[-10.7,10.7].
// In case of general parameter scale, multiplier 3 is taken into account
// in TanhPrepare function and it is included in
// input_multiplier already.
if (input_multiplier == 0) { // power of two case
input_multiplier = 3 << input_left_shift;
input_left_shift = 0;
}
int32_t round = (input_left_shift > 0) ? 1 << (input_left_shift - 1) : 0;
int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i, ptr_input_data++, ptr_output_data++) {
int32_t input_data =
((*ptr_input_data) * input_multiplier + round) >> input_left_shift;
uint32_t abs_input_data = abs(input_data);
uint32_t uh = abs_input_data >> 8;
int32_t result;
if (uh >= 255) {
// Saturate to maximum.
result = 0xFFFF << 8;
} else {
uint32_t ua = sigmoid_table_uint16[uh];
uint32_t ub = sigmoid_table_uint16[uh + 1];
uint8_t ut = abs_input_data & 0xFF;
result = (ua << 8) + ut * (ub - ua);
}
result = (input_data >= 0)
? (result - (1 << (14 + 9)) + (1 << (9 - 2)))
: (-result + (1 << (14 + 9)) + (1 << (9 - 2)) - 1);
// Convert back to 16-bit.
result >>= (9 - 1);
*ptr_output_data = result;
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TANH_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/tanh.h | C++ | apache-2.0 | 4,289 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_integer_ops {
// Fixed-point per-channel-quantization transpose convolution reference kernel.
inline void TransposeConv(
const ConvParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int8_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const int32_t* bias_data, const RuntimeShape& output_shape,
int8_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data,
int32_t* scratch_buffer) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int32_t input_offset = params.input_offset;
const int32_t output_offset = params.output_offset;
const int32_t output_activation_min = std::numeric_limits<int8_t>::min();
const int32_t output_activation_max = std::numeric_limits<int8_t>::max();
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int num_elements = output_shape.FlatSize();
// We need to initialize scratch_buffer to all 0s, as we apply the same
// 'scatter' based trick as in float version.
memset(scratch_buffer, 0, num_elements * sizeof(int32_t));
// Loop through input elements one at a time.
for (int batch = 0; batch < batches; ++batch) {
for (int in_y = 0; in_y < input_height; ++in_y) {
for (int in_x = 0; in_x < input_width; ++in_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
// Loop through the output elements it will influence.
const int out_x_origin = (in_x * stride_width) - pad_width;
const int out_y_origin = (in_y * stride_height) - pad_height;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
for (int out_channel = 0; out_channel < output_depth;
++out_channel) {
// Compute output element location.
const int out_x = out_x_origin + filter_x;
const int out_y = out_y_origin + filter_y;
// We cannot accumulate out of bounds.
if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) &&
(out_y < output_height)) {
const int8_t input_value = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
const int8_t filter_value =
filter_data[Offset(filter_shape, out_channel, filter_y,
filter_x, in_channel)];
scratch_buffer[Offset(output_shape, batch, out_y, out_x,
out_channel)] +=
(input_value + input_offset) * filter_value;
}
}
}
}
}
}
}
}
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
int32_t acc = scratch_buffer[Offset(output_shape, batch, out_y, out_x,
out_channel)];
if (bias_data) {
acc += bias_data[out_channel];
}
acc = MultiplyByQuantizedMultiplier(
acc, output_multiplier[out_channel], output_shift[out_channel]);
acc += output_offset;
acc = std::max(acc, output_activation_min);
acc = std::min(acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
static_cast<int8_t>(acc);
}
}
}
}
}
// int16_t input (zero_point=0), int8_t filter, int64 accumulator
inline void TransposeConv(
const ConvParams& params, const int32_t* output_multiplier,
const int32_t* output_shift, const RuntimeShape& input_shape,
const int16_t* input_data, const RuntimeShape& filter_shape,
const int8_t* filter_data, const RuntimeShape& bias_shape,
const std::int64_t* bias_data, const RuntimeShape& output_shape,
int16_t* output_data, const RuntimeShape& im2col_shape, int8_t* im2col_data,
std::int64_t* scratch_buffer) {
const int stride_width = params.stride_width;
const int stride_height = params.stride_height;
const int pad_width = params.padding_values.width;
const int pad_height = params.padding_values.height;
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(filter_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
(void)im2col_data; // only used in optimized code.
(void)im2col_shape; // only used in optimized code.
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int input_depth = MatchingDim(input_shape, 3, filter_shape, 3);
const int output_depth = MatchingDim(filter_shape, 0, output_shape, 3);
if (bias_data) {
TFLITE_DCHECK_EQ(bias_shape.FlatSize(), output_depth);
}
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int filter_height = filter_shape.Dims(1);
const int filter_width = filter_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int32_t output_activation_min = std::numeric_limits<int16_t>::min();
const int32_t output_activation_max = std::numeric_limits<int16_t>::max();
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
const int num_elements = output_shape.FlatSize();
// We need to initialize scratch_buffer to all 0s, as we apply the same
// 'scatter' based trick as in float version.
memset(scratch_buffer, 0, num_elements * sizeof(std::int64_t));
// Loop through input elements one at a time.
for (int batch = 0; batch < batches; ++batch) {
for (int in_y = 0; in_y < input_height; ++in_y) {
for (int in_x = 0; in_x < input_width; ++in_x) {
for (int in_channel = 0; in_channel < input_depth; ++in_channel) {
// Loop through the output elements it will influence.
const int out_x_origin = (in_x * stride_width) - pad_width;
const int out_y_origin = (in_y * stride_height) - pad_height;
for (int filter_y = 0; filter_y < filter_height; ++filter_y) {
for (int filter_x = 0; filter_x < filter_width; ++filter_x) {
for (int out_channel = 0; out_channel < output_depth;
++out_channel) {
// Compute output element location.
const int out_x = out_x_origin + filter_x;
const int out_y = out_y_origin + filter_y;
// We cannot accumulate out of bounds.
if ((out_x >= 0) && (out_x < output_width) && (out_y >= 0) &&
(out_y < output_height)) {
const int32_t input_value = input_data[Offset(
input_shape, batch, in_y, in_x, in_channel)];
const int32_t filter_value =
filter_data[Offset(filter_shape, out_channel, filter_y,
filter_x, in_channel)];
scratch_buffer[Offset(output_shape, batch, out_y, out_x,
out_channel)] +=
input_value * filter_value;
}
}
}
}
}
}
}
}
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int out_channel = 0; out_channel < output_depth; ++out_channel) {
std::int64_t acc = scratch_buffer[Offset(output_shape, batch, out_y,
out_x, out_channel)];
if (bias_data) {
acc += bias_data[out_channel];
}
int32_t scaled_acc = MultiplyByQuantizedMultiplier(
acc, output_multiplier[out_channel], output_shift[out_channel]);
scaled_acc = std::max(scaled_acc, output_activation_min);
scaled_acc = std::min(scaled_acc, output_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, out_channel)] =
static_cast<int16_t>(scaled_acc);
}
}
}
}
}
} // namespace reference_integer_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_INTEGER_OPS_TRANSPOSE_CONV_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/integer_ops/transpose_conv.h | C++ | apache-2.0 | 10,511 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_
#include <algorithm>
#include <cmath>
#include "tensorflow/lite/c/common.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape,
float* output_data, float epsilon = 1e-6) {
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
for (int i = 0; i < outer_size; ++i) {
float squared_l2_norm = 0;
for (int c = 0; c < depth; ++c) {
const float val = input_data[depth * i + c];
squared_l2_norm += val * val;
}
float l2_norm = std::sqrt(squared_l2_norm);
l2_norm = std::max(l2_norm, epsilon);
for (int c = 0; c < depth; ++c) {
output_data[depth * i + c] = input_data[depth * i + c] / l2_norm;
}
}
}
inline void L2Normalization(const tflite::L2NormalizationParams& op_params,
const RuntimeShape& input_shape,
const uint8_t* input_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int32_t input_zero_point = op_params.input_zero_point;
for (int i = 0; i < outer_size; ++i) {
int32_t square_l2_norm = 0;
for (int c = 0; c < depth; c++) {
int32_t diff = input_data[depth * i + c] - input_zero_point;
square_l2_norm += diff * diff;
}
int32_t inv_l2norm_multiplier;
int inv_l2norm_shift;
GetInvSqrtQuantizedMultiplierExp(square_l2_norm, kReverseShift,
&inv_l2norm_multiplier, &inv_l2norm_shift);
for (int c = 0; c < depth; c++) {
int32_t diff = input_data[depth * i + c] - input_zero_point;
int32_t rescaled_diff = MultiplyByQuantizedMultiplierSmallerThanOneExp(
128 * diff, inv_l2norm_multiplier, inv_l2norm_shift);
int32_t unclamped_output_val = 128 + rescaled_diff;
int32_t output_val =
std::min(static_cast<int32_t>(255),
std::max(static_cast<int32_t>(0), unclamped_output_val));
output_data[depth * i + c] = static_cast<uint8_t>(output_val);
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_L2NORMALIZATION_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/l2normalization.h | C++ | apache-2.0 | 3,750 |
/* Copyright 2020 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
#include <algorithm>
#include <limits>
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
inline void LeakyRelu(const tflite::LeakyReluParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
const float val = input_data[i];
// Note that alpha might be > 1 or < 0, so we don't use std::max here.
output_data[i] = val > 0 ? val : val * params.alpha;
}
}
template <typename T>
inline void QuantizeLeakyRelu(const LeakyReluParams& params,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& output_shape,
T* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
static const int32_t quantized_min = std::numeric_limits<T>::min();
static const int32_t quantized_max = std::numeric_limits<T>::max();
for (int i = 0; i < flat_size; ++i) {
const int32_t input_value = input_data[i] - params.input_offset;
int32_t unclamped_output;
if (input_value >= 0) {
unclamped_output = params.output_offset +
MultiplyByQuantizedMultiplier(
input_value, params.output_multiplier_identity,
params.output_shift_identity);
} else {
unclamped_output = params.output_offset +
MultiplyByQuantizedMultiplier(
input_value, params.output_multiplier_alpha,
params.output_shift_alpha);
}
const T clamped_output =
std::min(quantized_max, std::max(quantized_min, unclamped_output));
output_data[i] = static_cast<T>(clamped_output);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEAKY_RELU_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/leaky_relu.h | C++ | apache-2.0 | 2,911 |
/* Copyright 2018 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEGACY_REFERENCE_OPS_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEGACY_REFERENCE_OPS_H_
#include <stdint.h>
#include <sys/types.h>
#include "public/gemmlowp.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/legacy_types.h"
#include "tensorflow/lite/kernels/internal/reference/conv.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_float.h"
#include "tensorflow/lite/kernels/internal/reference/depthwiseconv_uint8.h"
#include "tensorflow/lite/kernels/internal/reference/reference_ops.h"
#include "tensorflow/lite/kernels/internal/reference/tanh.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
static constexpr int kDepthwiseReverseShift = -1;
inline void ShapeFromDims(const tflite::Dims<4>& dims, RuntimeShape* shape) {
shape->BuildFrom(
{dims.sizes[3], dims.sizes[2], dims.sizes[1], dims.sizes[0]});
}
inline void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height,
int dilation_width_factor, int dilation_height_factor,
int pad_width, int pad_height, int depth_multiplier,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::DepthwiseParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.depth_multiplier = depth_multiplier;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
DepthwiseConv(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(bias_dims),
bias_data, DimsToShape(output_dims), output_data);
}
inline void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
DepthwiseConv(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride_width, stride_height, 1, 1, pad_width,
pad_height, depth_multiplier, output_activation_min,
output_activation_max, output_data, output_dims);
}
// Legacy, for compatibility with old checked-in code.
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
DepthwiseConv(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride_width, stride_height, pad_width, pad_height,
depth_multiplier, output_activation_min, output_activation_max,
output_data, output_dims);
}
// Legacy, for compatibility with old checked-in code.
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int depth_multiplier,
float* output_data, const Dims<4>& output_dims) {
DepthwiseConv<Ac>(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride, stride, pad_width, pad_height,
depth_multiplier, output_data, output_dims);
}
inline void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height,
int dilation_width_factor, int dilation_height_factor,
int pad_width, int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::DepthwiseParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.depth_multiplier = depth_multiplier;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kDepthwiseReverseShift * output_shift;
DepthwiseConv(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(bias_dims),
bias_data, DimsToShape(output_dims), output_data);
}
inline void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
DepthwiseConv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width,
stride_height, 1, 1, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
// Legacy, for compatibility with old checked-in code.
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int depth_multiplier, int32 output_offset,
int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
DepthwiseConv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width,
stride_height, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
// Legacy, for compatibility with old checked-in code.
template <FusedActivationFunctionType Ac>
void DepthwiseConv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int depth_multiplier,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
DepthwiseConv<Ac>(input_data, input_dims, input_offset, filter_data,
filter_dims, filter_offset, bias_data, bias_dims, stride,
stride, pad_width, pad_height, depth_multiplier,
output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims);
}
inline void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims,
float* im2col_data, const Dims<4>& im2col_dims) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Conv(op_params, DimsToShape(input_dims), input_data, DimsToShape(filter_dims),
filter_data, DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data, DimsToShape(im2col_dims), im2col_data);
}
template <FusedActivationFunctionType Ac>
void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride_width,
int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
float* output_data, const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
Conv(input_data, input_dims, filter_data, filter_dims, bias_data, bias_dims,
stride_width, stride_height, dilation_width_factor,
dilation_height_factor, pad_width, pad_height, output_activation_min,
output_activation_max, output_data, output_dims, im2col_data,
im2col_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride_width,
int stride_height, int pad_width, int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
Conv(input_data, input_dims, filter_data, filter_dims, bias_data, bias_dims,
stride_width, stride_height, 1, 1, pad_width, pad_height,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Conv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
const float* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
Conv<Ac>(input_data, input_dims, filter_data, filter_dims, bias_data,
bias_dims, stride, stride, 1, 1, pad_width, pad_height, output_data,
output_dims, im2col_data, im2col_dims);
}
inline void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int dilation_width_factor,
int dilation_height_factor, int pad_width, int pad_height,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims,
uint8* im2col_data, const Dims<4>& im2col_dims,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
op_params.dilation_width_factor = dilation_width_factor;
op_params.dilation_height_factor = dilation_height_factor;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Conv(op_params, DimsToShape(input_dims), input_data, DimsToShape(filter_dims),
filter_data, DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data, DimsToShape(im2col_dims), im2col_data, gemmlowp_context);
}
inline void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims, uint8* im2col_data,
const Dims<4>& im2col_dims,
gemmlowp::GemmContext* gemmlowp_context) {
Conv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width, stride_height, 1, 1,
pad_width, pad_height, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims, gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
inline void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims, uint8* im2col_data,
const Dims<4>& im2col_dims,
gemmlowp::GemmContext* gemmlowp_context) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
Conv(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride_width, stride_height,
pad_width, pad_height, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data, output_dims,
im2col_data, im2col_dims, gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Conv(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims, int stride,
int pad_width, int pad_height, int32 output_offset,
int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims, uint8* im2col_data,
const Dims<4>& im2col_dims, gemmlowp::GemmContext* gemmlowp_context) {
Conv<Ac>(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, stride, stride, pad_width,
pad_height, output_offset, output_multiplier, output_shift,
output_activation_min, output_activation_max, output_data,
output_dims, im2col_data, im2col_dims, gemmlowp_context);
}
inline void TransposeConv(const float* input_data, const Dims<4>& input_dims,
const float* filter_data, const Dims<4>& filter_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, float* output_data,
const Dims<4>& output_dims, float* im2col_data,
const Dims<4>& im2col_dims) {
tflite::ConvParams op_params;
// Padding type is ignored, but still set.
op_params.padding_type = PaddingType::kSame;
op_params.padding_values.width = pad_width;
op_params.padding_values.height = pad_height;
op_params.stride_width = stride_width;
op_params.stride_height = stride_height;
TransposeConv(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data,
/*bias_shape*/ RuntimeShape(), /*bias*/ nullptr,
DimsToShape(output_dims), output_data, DimsToShape(im2col_dims),
im2col_data);
}
inline void TransposeConv(
const ConvParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& filter_shape,
const float* filter_data, const RuntimeShape& output_shape,
float* output_data, const RuntimeShape& im2col_shape, float* im2col_data) {
TransposeConv(params, input_shape, input_data, filter_shape, filter_data,
/*bias_shape*/ RuntimeShape(), /*bias*/ nullptr, output_shape,
output_data, im2col_shape, im2col_data);
}
inline void FullyConnected(const float* input_data, const Dims<4>& input_dims,
const float* weights_data,
const Dims<4>& weights_dims, const float* bias_data,
const Dims<4>& bias_dims,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::FullyConnectedParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
FullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(weights_dims), weights_data,
DimsToShape(bias_dims), bias_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void FullyConnected(const float* input_data, const Dims<4>& input_dims,
const float* weights_data, const Dims<4>& weights_dims,
const float* bias_data, const Dims<4>& bias_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
FullyConnected(input_data, input_dims, weights_data, weights_dims, bias_data,
bias_dims, output_activation_min, output_activation_max,
output_data, output_dims);
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
uint8* output_data, gemmlowp::GemmContext*) {
FullyConnected(params, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data);
}
inline void FullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& filter_shape,
const uint8* filter_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int16* output_data, gemmlowp::GemmContext*) {
FullyConnected(params, input_shape, input_data, filter_shape, filter_data,
bias_shape, bias_data, output_shape, output_data);
}
inline void FullyConnected(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::FullyConnectedParams op_params;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
FullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(bias_dims),
bias_data, DimsToShape(output_dims), output_data,
gemmlowp_context);
}
inline void FullyConnected(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, int16* output_data,
const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::FullyConnectedParams op_params;
op_params.input_offset = input_offset;
op_params.weights_offset = filter_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
FullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(filter_dims), filter_data, DimsToShape(bias_dims),
bias_data, DimsToShape(output_dims), output_data,
gemmlowp_context);
}
inline void ShuffledFullyConnected(
const FullyConnectedParams& params, const RuntimeShape& input_shape,
const uint8* input_data, const RuntimeShape& weights_shape,
const uint8* shuffled_weights_data, const RuntimeShape& bias_shape,
const int32* bias_data, const RuntimeShape& output_shape,
int16* output_data, uint8* shuffled_input_workspace_data,
gemmlowp::GemmContext*) {
ShuffledFullyConnected(params, input_shape, input_data, weights_shape,
shuffled_weights_data, bias_shape, bias_data,
output_shape, output_data,
shuffled_input_workspace_data);
}
inline void ShuffledFullyConnected(
const uint8* input_data, const Dims<4>& input_dims,
const uint8* shuffled_weights_data, const Dims<4>& weights_dims,
const int32* bias_data, const Dims<4>& bias_dims, int32 output_multiplier,
int output_shift, int32 output_activation_min, int32 output_activation_max,
int16* output_data, const Dims<4>& output_dims,
uint8* shuffled_input_workspace_data,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::FullyConnectedParams op_params;
op_params.output_multiplier = output_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
ShuffledFullyConnected(op_params, DimsToShape(input_dims), input_data,
DimsToShape(weights_dims), shuffled_weights_data,
DimsToShape(bias_dims), bias_data,
DimsToShape(output_dims), output_data,
shuffled_input_workspace_data, gemmlowp_context);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void FullyConnected(const uint8* input_data, const Dims<4>& input_dims,
int32 input_offset, const uint8* filter_data,
const Dims<4>& filter_dims, int32 filter_offset,
const int32* bias_data, const Dims<4>& bias_dims,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims,
gemmlowp::GemmContext* gemmlowp_context) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
FullyConnected(input_data, input_dims, input_offset, filter_data, filter_dims,
filter_offset, bias_data, bias_dims, output_offset,
output_multiplier, output_shift, output_activation_min,
output_activation_max, output_data, output_dims,
gemmlowp_context);
}
inline void LstmCell(const float* input_data, const Dims<4>& input_dims,
const float* prev_activ_data,
const Dims<4>& prev_activ_dims, const float* weights_data,
const Dims<4>& weights_dims, const float* bias_data,
const Dims<4>& bias_dims, const float* prev_state_data,
const Dims<4>& prev_state_dims, float* output_state_data,
const Dims<4>& output_state_dims, float* output_activ_data,
const Dims<4>& output_activ_dims, float* concat_temp_data,
const Dims<4>& concat_temp_dims, float* activ_temp_data,
const Dims<4>& activ_temp_dims) {
tflite::LstmCellParams op_params;
// Float LSTM cell does not need parameters to be set: leave untouched.
LstmCell(op_params, DimsToShape(input_dims), input_data,
DimsToShape(prev_activ_dims), prev_activ_data,
DimsToShape(weights_dims), weights_data, DimsToShape(bias_dims),
bias_data, DimsToShape(prev_state_dims), prev_state_data,
DimsToShape(output_state_dims), output_state_data,
DimsToShape(output_activ_dims), output_activ_data,
DimsToShape(concat_temp_dims), concat_temp_data,
DimsToShape(activ_temp_dims), activ_temp_data);
}
template <int StateIntegerBits>
void LstmCell(const uint8* input_data_uint8, const Dims<4>& input_dims,
const uint8* prev_activ_data_uint8,
const Dims<4>& prev_activ_dims, const uint8* weights_data_uint8,
const Dims<4>& weights_dims, const int32* bias_data_int32,
const Dims<4>& bias_dims, const int16* prev_state_data_int16,
const Dims<4>& prev_state_dims, int16* output_state_data_int16,
const Dims<4>& output_state_dims, uint8* output_activ_data_uint8,
const Dims<4>& output_activ_dims, uint8* concat_temp_data_uint8,
const Dims<4>& concat_temp_dims, int16* activ_temp_data_int16,
const Dims<4>& activ_temp_dims, int32 weights_zero_point,
int32 accum_multiplier, int accum_shift,
gemmlowp::GemmContext* gemmlowp_context) {
tflite::LstmCellParams op_params;
op_params.weights_zero_point = weights_zero_point;
op_params.accum_multiplier = accum_multiplier;
op_params.accum_shift = accum_shift;
LstmCell<StateIntegerBits>(
op_params, DimsToShape(input_dims), input_data_uint8,
DimsToShape(prev_activ_dims), prev_activ_data_uint8,
DimsToShape(weights_dims), weights_data_uint8, DimsToShape(bias_dims),
bias_data_int32, DimsToShape(prev_state_dims), prev_state_data_int16,
DimsToShape(output_state_dims), output_state_data_int16,
DimsToShape(output_activ_dims), output_activ_data_uint8,
DimsToShape(concat_temp_dims), concat_temp_data_uint8,
DimsToShape(activ_temp_dims), activ_temp_data_int16, gemmlowp_context);
}
template <typename T>
void BroadcastDiv(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
BroadcastDivSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void Div(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
Div(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac, typename Scalar>
inline void Concatenation(int concat_dim, const Scalar* const* input_data,
const Dims<4>* const* input_dims, int inputs_count,
Scalar* output_data, const Dims<4>& output_dims) {
// For now we don't have a model with a Concatenation with fused activation.
TFLITE_DCHECK_EQ(Ac, FusedActivationFunctionType::kNone);
std::vector<RuntimeShape> input_shapes(inputs_count);
std::vector<const RuntimeShape*> input_shapes_indirect(inputs_count);
for (int i = 0; i < inputs_count; ++i) {
ShapeFromDims(*input_dims[i], &input_shapes[i]);
input_shapes_indirect[i] = &input_shapes[i];
}
tflite::ConcatenationParams op_params;
op_params.axis = 3 - concat_dim;
op_params.inputs_count = inputs_count;
Concatenation(op_params, input_shapes_indirect.data(), input_data,
DimsToShape(output_dims), output_data);
}
inline void Concatenation(int concat_dim, const uint8* const* input_data,
const Dims<4>* const* input_dims,
const int32* input_zeropoint,
const float* input_scale, int inputs_count,
uint8* output_data, const Dims<4>& output_dims,
const int32 output_zeropoint,
const float output_scale) {
std::vector<RuntimeShape> input_shapes(inputs_count);
std::vector<const RuntimeShape*> input_shapes_indirect(inputs_count);
for (int i = 0; i < inputs_count; ++i) {
ShapeFromDims(*input_dims[i], &input_shapes[i]);
input_shapes_indirect[i] = &input_shapes[i];
}
tflite::ConcatenationParams op_params;
op_params.axis = 3 - concat_dim;
op_params.input_zeropoint = input_zeropoint;
op_params.input_scale = input_scale;
op_params.inputs_count = inputs_count;
op_params.output_zeropoint = output_zeropoint;
op_params.output_scale = output_scale;
ConcatenationWithScaling(op_params, input_shapes_indirect.data(), input_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac, typename Scalar>
void DepthConcatenation(const Scalar* const* input_data,
const Dims<4>* const* input_dims, int inputs_count,
Scalar* output_data, const Dims<4>& output_dims) {
// For now we don't have a model with a Concatenation with fused activation.
TFLITE_DCHECK_EQ(Ac, FusedActivationFunctionType::kNone);
std::vector<RuntimeShape> input_shapes(inputs_count);
std::vector<const RuntimeShape*> input_shapes_indirect(inputs_count);
for (int i = 0; i < inputs_count; ++i) {
ShapeFromDims(*input_dims[i], &input_shapes[i]);
input_shapes_indirect[i] = &input_shapes[i];
}
tflite::ConcatenationParams op_params;
op_params.inputs_count = inputs_count;
DepthConcatenation(op_params, input_shapes_indirect.data(), input_data,
DimsToShape(output_dims), output_data);
}
template <typename Scalar>
void TensorFlowSplit(const Scalar* input_data, const Dims<4>& input_dims,
int axis, int outputs_count, Scalar* const* output_data,
const Dims<4>* const* output_dims) {
std::vector<RuntimeShape> output_shapes(outputs_count);
std::vector<const RuntimeShape*> output_shapes_indirect(outputs_count);
for (int i = 0; i < outputs_count; ++i) {
ShapeFromDims(*output_dims[i], &output_shapes[i]);
output_shapes_indirect[i] = &output_shapes[i];
}
tflite::SplitParams op_params;
op_params.axis = 3 - axis;
op_params.num_split = outputs_count;
Split(op_params, DimsToShape(input_dims), input_data,
output_shapes_indirect.data(), output_data);
}
template <FusedActivationFunctionType Ac, typename Scalar>
void TensorFlowSplit(const Scalar* input_data, const Dims<4>& input_dims,
int outputs_count, Scalar* const* output_data,
const Dims<4>* const* output_dims) {
TFLITE_DCHECK_GE(outputs_count, 1);
for (int i = 0; i < outputs_count; i++) {
/* batches = */ MatchingArraySize(*output_dims[i], 3, input_dims, 3);
/* height = */ MatchingArraySize(*output_dims[i], 2, input_dims, 2);
/* width = */ MatchingArraySize(*output_dims[i], 1, input_dims, 1);
}
// For now we don't have a model with a Split with fused activation.
TFLITE_DCHECK_EQ(Ac, FusedActivationFunctionType::kNone);
TensorFlowSplit(input_data, input_dims, /*axis=*/0, outputs_count,
output_data, output_dims);
}
inline void Softmax(const float* input_data, const RuntimeShape& input_shape,
float beta, float* output_data,
const RuntimeShape& output_shape) {
SoftmaxParams params;
params.beta = beta;
Softmax(params, input_shape, input_data, output_shape, output_data);
}
inline void Softmax(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_beta_multiplier, int32 input_beta_left_shift,
int diff_min, uint8* output_data,
const RuntimeShape& output_shape) {
SoftmaxParams params;
params.input_multiplier = input_beta_multiplier;
params.input_left_shift = input_beta_left_shift;
params.diff_min = diff_min;
Softmax(params, input_shape, input_data, output_shape, output_data);
}
inline void LogSoftmax(const float* input_data, const RuntimeShape& input_shape,
float* output_data, const RuntimeShape& output_shape) {
SoftmaxParams params;
// No params currently used for float LogSoftmax.
LogSoftmax(params, input_shape, input_data, output_shape, output_data);
}
inline void LogSoftmax(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_multiplier, int32 input_left_shift,
int32 reverse_scaling_divisor,
int32 reverse_scaling_right_shift, int diff_min,
uint8* output_data, const RuntimeShape& output_shape) {
SoftmaxParams params;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
params.reverse_scaling_divisor = reverse_scaling_divisor;
params.reverse_scaling_right_shift = reverse_scaling_right_shift;
params.diff_min = diff_min;
LogSoftmax(params, input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const LogisticParams& params,
const RuntimeShape& input_shape, const uint8* input_data,
const RuntimeShape& output_shape, uint8* output_data) {
const int32 input_zero_point = params.input_zero_point;
const int32 input_range_radius = params.input_range_radius;
const int32 input_multiplier = params.input_multiplier;
const int input_left_shift = params.input_left_shift;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
const uint8 input_val_u8 = input_data[i];
const int32 input_val_centered =
static_cast<int32>(input_val_u8) - input_zero_point;
uint8 output_val;
if (input_val_centered <= -input_range_radius) {
output_val = 0;
} else if (input_val_centered >= input_range_radius) {
output_val = 255;
} else {
const int32 input_val_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_val_centered, input_multiplier, input_left_shift);
using FixedPoint4 = gemmlowp::FixedPoint<int32, 4>;
using FixedPoint0 = gemmlowp::FixedPoint<int32, 0>;
const FixedPoint4 input_val_f4 = FixedPoint4::FromRaw(input_val_rescaled);
const FixedPoint0 output_val_f0 = gemmlowp::logistic(input_val_f4);
// Convert from Q0.31 to Q23.8.
using gemmlowp::RoundingDivideByPOT;
int32 output_val_s32 = RoundingDivideByPOT(output_val_f0.raw(), 23);
if (output_val_s32 == 256) {
output_val_s32 = 255;
}
// Reinterpret as U0.8.
TFLITE_DCHECK_GE(output_val_s32, 0);
TFLITE_DCHECK_LE(output_val_s32, 255);
output_val = static_cast<uint8>(output_val_s32);
}
output_data[i] = output_val;
}
}
inline void Logistic(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const RuntimeShape& output_shape) {
LogisticParams params;
params.input_zero_point = input_zero_point;
params.input_range_radius = input_range_radius;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
Logistic(params, input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const RuntimeShape& input_shape, const int16* input_data,
const RuntimeShape& output_shape, int16* output_data) {
LogisticParams params;
// No params currently needed by int16 Logistic.
Logistic(params, input_shape, input_data, output_shape, output_data);
}
inline void Tanh(const uint8* input_data, const RuntimeShape& input_shape,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const RuntimeShape& output_shape) {
TanhParams params;
params.input_zero_point = input_zero_point;
params.input_range_radius = input_range_radius;
params.input_multiplier = input_multiplier;
params.input_left_shift = input_left_shift;
Tanh(params, input_shape, input_data, output_shape, output_data);
}
inline void Tanh(const int16* input_data, const RuntimeShape& input_shape,
int input_left_shift, int16* output_data,
const RuntimeShape& output_shape) {
TanhParams params;
params.input_left_shift = input_left_shift;
Tanh(params, input_shape, input_data, output_shape, output_data);
}
inline void Dequantize(const uint8* input_data, const Dims<4>& input_dims,
int32 zero_point, double scale, float* output_data,
const Dims<4>& output_dims) {
tflite::DequantizationParams op_params;
op_params.zero_point = zero_point;
op_params.scale = scale;
Dequantize(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
inline void FakeQuant(const float* input_data, const Dims<4>& input_dims,
float rmin, float rmax, int num_bits, float* output_data,
const Dims<4>& output_dims) {
tflite::FakeQuantParams op_params;
op_params.num_bits = num_bits;
op_params.minmax.min = rmin;
op_params.minmax.max = rmax;
FakeQuant(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void Gather(const T* input_data, const Dims<4>& input_dims,
int input_rank, const int32* coords_data,
const Dims<4>& coords_dims, T* output_data,
const Dims<4>& output_dims) {
tflite::GatherParams op_params;
op_params.axis = 4 - input_rank;
op_params.batch_dims = 0;
Gather(op_params, DimsToShape(input_dims), input_data,
DimsToShape(coords_dims), coords_data, DimsToShape(output_dims),
output_data);
}
inline uint32 LegacyReverseBits32(uint32 n) {
n = ((n >> 1) & 0x55555555) | ((n & 0x55555555) << 1);
n = ((n >> 2) & 0x33333333) | ((n & 0x33333333) << 2);
n = ((n >> 4) & 0x0F0F0F0F) | ((n & 0x0F0F0F0F) << 4);
return (((n & 0xFF) << 24) | ((n & 0xFF00) << 8) | ((n & 0xFF0000) >> 8) |
((n & 0xFF000000) >> 24));
}
inline void StridedSliceReverseIndices(tflite::StridedSliceParams* p) {
TFLITE_CHECK_EQ(p->start_indices_count, p->stop_indices_count);
TFLITE_CHECK_EQ(p->stop_indices_count, p->strides_count);
std::reverse(p->start_indices, p->start_indices + p->start_indices_count);
std::reverse(p->stop_indices, p->stop_indices + p->stop_indices_count);
std::reverse(p->strides, p->strides + p->strides_count);
p->begin_mask = LegacyReverseBits32(static_cast<uint32>(p->begin_mask)) >>
(32 - p->start_indices_count);
p->ellipsis_mask =
LegacyReverseBits32(static_cast<uint32>(p->ellipsis_mask)) >>
(32 - p->start_indices_count);
p->end_mask = LegacyReverseBits32(static_cast<uint32>(p->end_mask)) >>
(32 - p->start_indices_count);
p->new_axis_mask =
LegacyReverseBits32(static_cast<uint32>(p->new_axis_mask)) >>
(32 - p->start_indices_count);
p->shrink_axis_mask =
LegacyReverseBits32(static_cast<uint32>(p->shrink_axis_mask)) >>
(32 - p->start_indices_count);
}
template <typename T>
inline void StridedSlice(const T* input_data, const Dims<4>& input_dims,
int begin_mask, int end_mask, int shrink_axis_mask,
const std::vector<int>& start_indices,
const std::vector<int>& stop_indices,
const std::vector<int>& strides, T* output_data,
const Dims<4>& output_dims) {
TFLITE_DCHECK_EQ(start_indices.size(), 4);
auto op_params = strided_slice::BuildStridedSliceParams(
begin_mask, end_mask, shrink_axis_mask, start_indices, stop_indices,
strides);
StridedSliceReverseIndices(&op_params);
StridedSlice(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void Mean(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& reduction_indices, T* output_data,
const Dims<4>& output_dims) {
tflite::MeanParams op_params;
op_params.axis_count = reduction_indices.size();
for (int i = 0; i < op_params.axis_count; ++i) {
op_params.axis[i] = reduction_indices[op_params.axis_count - 1 - i];
}
Mean(op_params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void Transpose(const T* input, const Dims<4>& input_dims, T* output,
const Dims<4>& output_dims, const int* permuted_axes) {
TransposeParams params;
params.perm_count = 4;
for (int i = 0; i < 4; ++i) {
params.perm[i] = 3 - permuted_axes[3 - i];
}
Transpose(params, DimsToShape(input_dims), input, DimsToShape(output_dims),
output);
}
template <typename T, ComparisonFn<T> F>
inline void Comparison(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
bool* output_data, const Dims<4>& output_dims) {
ComparisonParams op_params;
// No parameters needed.
ComparisonImpl<T, F>(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <typename T, ComparisonFn<int32> F>
inline void Comparison(int left_shift, const T* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift,
const T* input2_data, const Dims<4>& input2_dims,
int32 input2_offset, int32 input2_multiplier,
int input2_shift, bool* output_data,
const Dims<4>& output_dims) {
tflite::ComparisonParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.input2_shift = kReverseShift * input2_shift;
ComparisonWithScaling<T, F>(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <typename T, ComparisonFn<T> F>
inline void BroadcastComparison(const T* input1_data,
const Dims<4>& input1_dims,
const T* input2_data,
const Dims<4>& input2_dims, bool* output_data,
const Dims<4>& output_dims) {
ComparisonParams op_params;
// No parameters needed.
BroadcastComparison4DSlowImpl<T, F>(op_params, DimsToShape(input1_dims),
input1_data, DimsToShape(input2_dims),
input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T, ComparisonFn<int32> F>
inline void BroadcastComparison(int left_shift, const T* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift,
const T* input2_data,
const Dims<4>& input2_dims, int32 input2_offset,
int32 input2_multiplier, int input2_shift,
bool* output_data, const Dims<4>& output_dims) {
ComparisonParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
// Legacy ops used mixed left and right shifts. Now all are +ve-means-left.
op_params.input2_shift = kReverseShift * input2_shift;
BroadcastComparison4DSlowWithScaling<T, F>(
op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
#define TFLITE_LEGACY_COMPARISON_OP(name) \
template <typename T> \
inline void name(const T* input1_data, const Dims<4>& input1_dims, \
const T* input2_data, const Dims<4>& input2_dims, \
bool* output_data, const Dims<4>& output_dims) { \
ruy::profiler::ScopeLabel label(#name); \
Comparison<T, name##Fn>(input1_data, input1_dims, input2_data, \
input2_dims, output_data, output_dims); \
} \
template <typename T> \
inline void name( \
int left_shift, const T* input1_data, const Dims<4>& input1_dims, \
int32 input1_offset, int32 input1_multiplier, int input1_shift, \
const T* input2_data, const Dims<4>& input2_dims, int32 input2_offset, \
int32 input2_multiplier, int input2_shift, bool* output_data, \
const Dims<4>& output_dims) { \
ruy::profiler::ScopeLabel label(#name "/8bit"); \
Comparison<T, name##Fn>(left_shift, input1_data, input1_dims, \
input1_offset, input1_multiplier, input1_shift, \
input2_data, input2_dims, input2_offset, \
input2_multiplier, input2_shift, output_data, \
output_dims); \
} \
template <typename T> \
inline void Broadcast##name( \
const T* input1_data, const Dims<4>& input1_dims, const T* input2_data, \
const Dims<4>& input2_dims, bool* output_data, \
const Dims<4>& output_dims) { \
ruy::profiler::ScopeLabel label("Broadcast" #name); \
BroadcastComparison<T, name##Fn>(input1_data, input1_dims, input2_data, \
input2_dims, output_data, output_dims); \
} \
template <typename T> \
inline void Broadcast##name( \
int left_shift, const T* input1_data, const Dims<4>& input1_dims, \
int32 input1_offset, int32 input1_multiplier, int input1_shift, \
const T* input2_data, const Dims<4>& input2_dims, int32 input2_offset, \
int32 input2_multiplier, int input2_shift, bool* output_data, \
const Dims<4>& output_dims) { \
ruy::profiler::ScopeLabel label("Broadcast" #name "/8bit"); \
BroadcastComparison<T, name##Fn>(left_shift, input1_data, input1_dims, \
input1_offset, input1_multiplier, \
input1_shift, input2_data, input2_dims, \
input2_offset, input2_multiplier, \
input2_shift, output_data, output_dims); \
}
TFLITE_LEGACY_COMPARISON_OP(Equal);
TFLITE_LEGACY_COMPARISON_OP(NotEqual);
TFLITE_LEGACY_COMPARISON_OP(Greater);
TFLITE_LEGACY_COMPARISON_OP(GreaterEqual);
TFLITE_LEGACY_COMPARISON_OP(Less);
TFLITE_LEGACY_COMPARISON_OP(LessEqual);
#undef TFLITE_LEGACY_COMPARISON_OP
template <typename D, typename T>
inline void Select(const D* input_condition_data,
const Dims<4>& input_condition_dims, const T* input_x_data,
const Dims<4>& input_x_dims, const T* input_y_data,
const Dims<4>& input_y_dims, T* output_data,
const Dims<4>& output_dims) {
Select(DimsToShape(input_condition_dims), input_condition_data,
DimsToShape(input_x_dims), input_x_data, DimsToShape(input_y_dims),
input_y_data, DimsToShape(output_dims), output_data);
}
template <typename D, typename T>
inline void RankOneSelect(const D* input_condition_data,
const Dims<4>& input_condition_dims,
const T* input_x_data, const Dims<4>& input_x_dims,
const T* input_y_data, const Dims<4>& input_y_dims,
T* output_data, const Dims<4>& output_dims) {
RankOneSelect(DimsToShape(input_condition_dims), input_condition_data,
DimsToShape(input_x_dims), input_x_data,
DimsToShape(input_y_dims), input_y_data,
DimsToShape(output_dims), output_data);
}
template <typename T, typename TI>
inline void SparseToDense(const std::vector<std::vector<TI>>& indices,
const T* values, T default_value, T* output_data,
const Dims<4>& output_dims, bool value_is_scalar) {
SparseToDense(indices, values, default_value, value_is_scalar,
DimsToShape(output_dims), output_data);
}
template <typename Scalar>
void Pack(int dim, const Scalar* const* input_data,
const Dims<4>* const* input_dims, int inputs_count,
Scalar* output_data, const Dims<4>& output_dims) {
std::vector<RuntimeShape> input_shapes(inputs_count);
std::vector<const RuntimeShape*> input_shapes_indirect(inputs_count);
for (int i = 0; i < inputs_count; ++i) {
ShapeFromDims(*input_dims[i], &input_shapes[i]);
input_shapes_indirect[i] = &input_shapes[i];
}
tflite::PackParams op_params;
op_params.axis = 3 - dim;
op_params.inputs_count = inputs_count;
Pack(op_params, input_shapes_indirect.data(), input_data,
DimsToShape(output_dims), output_data);
}
template <typename Scalar>
void Unpack(int axis, const Scalar* input_data, const Dims<4>& input_dims,
int dimensions, int outputs_count, Scalar* const* output_datas,
const Dims<4>& output_dims) {
tflite::UnpackParams op_params;
op_params.axis = 3 - axis;
op_params.num_split = outputs_count;
Unpack(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_datas);
}
template <typename Scalar>
void Pack(int dim, const Scalar* const* input_data,
const Dims<4>* const* input_dims, const int32* input_zeropoint,
const float* input_scale, int inputs_count, Scalar* output_data,
const Dims<4>& output_dims, const int32 output_zeropoint,
const float output_scale) {
std::vector<RuntimeShape> input_shapes(inputs_count);
std::vector<const RuntimeShape*> input_shapes_indirect(inputs_count);
for (int i = 0; i < inputs_count; ++i) {
ShapeFromDims(*input_dims[i], &input_shapes[i]);
input_shapes_indirect[i] = &input_shapes[i];
}
tflite::PackParams op_params;
op_params.axis = 3 - dim;
op_params.input_zeropoint = input_zeropoint;
op_params.input_scale = input_scale;
op_params.inputs_count = inputs_count;
op_params.output_zeropoint = output_zeropoint;
op_params.output_scale = output_scale;
PackWithScaling(op_params, input_shapes_indirect.data(), input_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac>
void L2Normalization(const float* input_data, const RuntimeShape& input_shape,
float* output_data, const RuntimeShape& output_shape) {
static_assert(Ac == FusedActivationFunctionType::kNone, "");
tflite::L2NormalizationParams op_params;
// No params need to be set for float.
L2Normalization(op_params, input_shape, input_data, output_shape,
output_data);
}
inline void L2Normalization(const uint8* input_data,
const RuntimeShape& input_shape,
int32 input_zero_point, uint8* output_data,
const RuntimeShape& output_shape) {
tflite::L2NormalizationParams op_params;
op_params.input_zero_point = input_zero_point;
L2Normalization(op_params, input_shape, input_data, output_shape,
output_data);
}
template <FusedActivationFunctionType Ac>
void L2Normalization(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
L2Normalization<Ac>(input_data, DimsToShape(input_dims), output_data,
DimsToShape(output_dims));
}
inline void L2Normalization(const uint8* input_data, const Dims<4>& input_dims,
int32 input_zero_point, uint8* output_data,
const Dims<4>& output_dims) {
L2Normalization(input_data, DimsToShape(input_dims), input_zero_point,
output_data, DimsToShape(output_dims));
}
inline void Relu(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Relu(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Relu1(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Relu1(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Relu6(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Relu6(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void ReluX(uint8 min_value, uint8 max_value, const uint8* input_data,
const RuntimeShape& input_shape, uint8* output_data,
const RuntimeShape& output_shape) {
tflite::ActivationParams params;
params.quantized_activation_max = max_value;
params.quantized_activation_min = min_value;
ReluX(params, input_shape, input_data, output_shape, output_data);
}
template <FusedActivationFunctionType Ac>
inline void Add(int left_shift, const uint8* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift,
const uint8* input2_data, const Dims<4>& input2_dims,
int32 input2_offset, int32 input2_multiplier, int input2_shift,
int32 output_offset, int32 output_multiplier, int output_shift,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
tflite::ArithmeticParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac>
void Add(const int32* input1_data, const Dims<4>& input1_dims,
const int32* input2_data, const Dims<4>& input2_dims,
int32* output_data, const Dims<4>& output_dims) {
ruy::profiler::ScopeLabel label("Add/int32");
TFLITE_DCHECK(Ac == FusedActivationFunctionType::kNone);
tflite::ArithmeticParams op_params;
op_params.quantized_activation_min = std::numeric_limits<int32>::min();
op_params.quantized_activation_max = std::numeric_limits<int32>::max();
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <FusedActivationFunctionType Ac>
inline void BroadcastAdd(int left_shift, const uint8* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift,
const uint8* input2_data, const Dims<4>& input2_dims,
int32 input2_offset, int32 input2_multiplier,
int input2_shift, int32 output_offset,
int32 output_multiplier, int output_shift,
int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
tflite::ArithmeticParams op_params;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac>
void Add(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
BroadcastAdd4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
template <FusedActivationFunctionType Ac>
inline void BroadcastAddFivefold(
int y0, int y1, int y2, int y3, int y4, int left_shift,
const uint8* input1_data, const Dims<4>& input1_dims, int32 input1_offset,
int32 input1_multiplier, int input1_shift, const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset, int32 input2_multiplier,
int input2_shift, int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
constexpr int kReverseShift = -1;
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
tflite::ArithmeticParams op_params;
op_params.broadcast_category =
tflite::BroadcastableOpCategory::kFirstInputBroadcastsFast;
op_params.left_shift = left_shift;
op_params.input1_offset = input1_offset;
op_params.input1_multiplier = input1_multiplier;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_offset = input2_offset;
op_params.input2_multiplier = input2_multiplier;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = kReverseShift * output_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
op_params.broadcast_shape[4] = y0;
op_params.broadcast_shape[3] = y1;
op_params.broadcast_shape[2] = y2;
op_params.broadcast_shape[1] = y3;
op_params.broadcast_shape[0] = y4;
BroadcastAddFivefold(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac, typename T>
void BroadcastAdd(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T* output_data, const Dims<4>& output_dims) {
T output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
BroadcastAdd(input1_data, input1_dims, input2_data, input2_dims,
output_activation_min, output_activation_max, output_data,
output_dims);
}
template <FusedActivationFunctionType Ac>
inline void Add(const int16* input1_data, const Dims<4>& input1_dims,
int input1_shift, const int16* input2_data,
const Dims<4>& input2_dims, int input2_shift,
int16 output_activation_min, int16 output_activation_max,
int16* output_data, const Dims<4>& output_dims) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
TFLITE_DCHECK_LE(output_activation_min, output_activation_max);
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, -32768);
TFLITE_DCHECK_EQ(output_activation_max, 32767);
}
tflite::ArithmeticParams op_params;
op_params.input1_shift = kReverseShift * input1_shift;
op_params.input2_shift = kReverseShift * input2_shift;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
Add(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void Sub(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(FusedActivationFunctionType::kNone,
&output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
op_params.float_activation_min = output_activation_min;
op_params.float_activation_max = output_activation_max;
Sub(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void Sub(const T* input1_data, const Dims<4>& input1_dims, const T* input2_data,
const Dims<4>& input2_dims, T* output_data,
const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.quantized_activation_min = std::numeric_limits<T>::min();
op_params.quantized_activation_max = std::numeric_limits<T>::max();
Sub(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min,
float output_activation_max, float* output_data,
const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = kheight;
params.filter_width = kwidth;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
// Transitional version that will be moved shortly to legacy_reference_ops, as
// part of RuntimeShape revisions.
inline void BroadcastMul4DSlow(const uint8* input1_data,
const Dims<4>& input1_dims, int32 input1_offset,
const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
op_params.input1_offset = input1_offset;
op_params.input2_offset = input2_offset;
op_params.output_offset = output_offset;
op_params.output_multiplier = output_multiplier;
op_params.output_shift = output_shift;
BroadcastMul4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
int32 input1_offset, const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
BroadcastMul4DSlow(
input1_data, input1_dims, input1_offset, input2_data, input2_dims,
input2_offset, output_offset, output_multiplier,
//
kReverseShift * output_shift,
//
output_activation_min, output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
inline void BroadcastMul(const uint8* input1_data, const Dims<4>& input1_dims,
int32 input1_offset, const uint8* input2_data,
const Dims<4>& input2_dims, int32 input2_offset,
int32 output_offset, int32 output_multiplier,
int output_shift, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
BroadcastMul(input1_data, input1_dims, input1_offset, input2_data,
input2_dims, input2_offset, output_offset, output_multiplier,
output_shift, output_activation_min, output_activation_max,
output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, kwidth, kheight, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, float* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
}
inline void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = filter_height;
params.filter_width = filter_width;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.quantized_activation_min = output_activation_min;
params.quantized_activation_max = output_activation_max;
AveragePool(params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
AveragePool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void AveragePool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width,
int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
AveragePool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
inline void MaxPool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int kwidth, int kheight,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
tflite::PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = kheight;
params.filter_width = kwidth;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width, int pad_height,
int kwidth, int kheight, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
MaxPool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, kwidth, kheight, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width, int filter_height,
float* output_data, const Dims<4>& output_dims) {
MaxPool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
}
inline void MaxPool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = filter_height;
params.filter_width = filter_width;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.quantized_activation_min = output_activation_min;
params.quantized_activation_max = output_activation_max;
MaxPool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const uint8* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width, int pad_height,
int filter_width, int filter_height, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
static_assert(Ac == FusedActivationFunctionType::kNone ||
Ac == FusedActivationFunctionType::kRelu ||
Ac == FusedActivationFunctionType::kRelu6 ||
Ac == FusedActivationFunctionType::kRelu1,
"");
if (Ac == FusedActivationFunctionType::kNone) {
TFLITE_DCHECK_EQ(output_activation_min, 0);
TFLITE_DCHECK_EQ(output_activation_max, 255);
}
MaxPool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void MaxPool(const uint8* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width, int filter_height,
int32 output_activation_min, int32 output_activation_max,
uint8* output_data, const Dims<4>& output_dims) {
MaxPool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
inline void L2Pool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width,
int pad_height, int filter_width, int filter_height,
float output_activation_min, float output_activation_max,
float* output_data, const Dims<4>& output_dims) {
PoolParams params;
params.stride_height = stride_height;
params.stride_width = stride_width;
params.filter_height = filter_height;
params.filter_width = filter_width;
params.padding_values.height = pad_height;
params.padding_values.width = pad_width;
params.float_activation_min = output_activation_min;
params.float_activation_max = output_activation_max;
L2Pool(params, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void L2Pool(const float* input_data, const Dims<4>& input_dims,
int stride_width, int stride_height, int pad_width, int pad_height,
int filter_width, int filter_height, float* output_data,
const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
L2Pool(input_data, input_dims, stride_width, stride_height, pad_width,
pad_height, filter_width, filter_height, output_activation_min,
output_activation_max, output_data, output_dims);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void L2Pool(const float* input_data, const Dims<4>& input_dims, int stride,
int pad_width, int pad_height, int filter_width, int filter_height,
float* output_data, const Dims<4>& output_dims) {
L2Pool<Ac>(input_data, input_dims, stride, stride, pad_width, pad_height,
filter_width, filter_height, output_data, output_dims);
}
inline void Softmax(const float* input_data, const Dims<4>& input_dims,
float beta, float* output_data,
const Dims<4>& output_dims) {
Softmax(input_data, DimsToShape(input_dims), beta, output_data,
DimsToShape(output_dims));
}
inline void Softmax(const uint8* input_data, const Dims<4>& input_dims,
int32 input_beta_multiplier, int32 input_beta_left_shift,
int diff_min, uint8* output_data,
const Dims<4>& output_dims) {
Softmax(input_data, DimsToShape(input_dims), input_beta_multiplier,
input_beta_left_shift, diff_min, output_data,
DimsToShape(output_dims));
}
inline void LogSoftmax(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
LogSoftmax(input_data, DimsToShape(input_dims), output_data,
DimsToShape(output_dims));
}
inline void LogSoftmax(const uint8* input_data, const Dims<4>& input_dims,
int32 input_multiplier, int32 input_left_shift,
int32 reverse_scaling_divisor,
int32 reverse_scaling_right_shift, int diff_min,
uint8* output_data, const Dims<4>& output_dims) {
LogSoftmax(input_data, DimsToShape(input_dims), input_multiplier,
input_left_shift, reverse_scaling_divisor,
reverse_scaling_right_shift, diff_min, output_data,
DimsToShape(output_dims));
}
inline void Logistic(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Logistic(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Logistic(const uint8* input_data, const Dims<4>& input_dims,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const Dims<4>& output_dims) {
Logistic(input_data, DimsToShape(input_dims), input_zero_point,
input_range_radius, input_multiplier, input_left_shift, output_data,
DimsToShape(output_dims));
}
inline void Logistic(const int16* input_data, const Dims<4>& input_dims,
int16* output_data, const Dims<4>& output_dims) {
Logistic(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Tanh(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Tanh(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Tanh(const uint8* input_data, const Dims<4>& input_dims,
int32 input_zero_point, int32 input_range_radius,
int32 input_multiplier, int input_left_shift,
uint8* output_data, const Dims<4>& output_dims) {
Tanh(input_data, DimsToShape(input_dims), input_zero_point,
input_range_radius, input_multiplier, input_left_shift, output_data,
DimsToShape(output_dims));
}
inline void Tanh(const int16* input_data, const Dims<4>& input_dims,
int input_left_shift, int16* output_data,
const Dims<4>& output_dims) {
Tanh(input_data, DimsToShape(input_dims), input_left_shift, output_data,
DimsToShape(output_dims));
}
template <typename T>
inline void DepthToSpace(const T* input_data, const Dims<4>& input_dims,
int block_size, T* output_data,
const Dims<4>& output_dims) {
tflite::DepthToSpaceParams op_params;
op_params.block_size = block_size;
DepthToSpace(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void SpaceToDepth(const T* input_data, const Dims<4>& input_dims,
int block_size, T* output_data,
const Dims<4>& output_dims) {
tflite::SpaceToDepthParams op_params;
op_params.block_size = block_size;
SpaceToDepth(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void Mul(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac>
void Mul(const float* input1_data, const Dims<4>& input1_dims,
const float* input2_data, const Dims<4>& input2_dims,
float* output_data, const Dims<4>& output_dims) {
float output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
void BroadcastMul(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T output_activation_min, T output_activation_max,
T* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
BroadcastMul4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
template <FusedActivationFunctionType Ac, typename T>
void BroadcastMul(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T* output_data, const Dims<4>& output_dims) {
T output_activation_min, output_activation_max;
GetActivationMinMax(Ac, &output_activation_min, &output_activation_max);
tflite::ArithmeticParams op_params;
SetActivationParams(output_activation_min, output_activation_max, &op_params);
BroadcastMul4DSlow(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
inline void Mul(const int16* input1_data, const Dims<4>& input1_dims,
const int16* input2_data, const Dims<4>& input2_dims,
int16* output_data, const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
// No params in this version.
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void Mul(const int16* input1_data, const Dims<4>& input1_dims,
const int16* input2_data, const Dims<4>& input2_dims,
int32 output_offset, int32 output_activation_min,
int32 output_activation_max, uint8* output_data,
const Dims<4>& output_dims) {
tflite::ArithmeticParams op_params;
op_params.quantized_activation_min = output_activation_min;
op_params.quantized_activation_max = output_activation_max;
op_params.output_offset = output_offset;
Mul(op_params, DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data, DimsToShape(output_dims),
output_data);
}
inline void LocalResponseNormalization(const float* input_data,
const Dims<4>& input_dims, int range,
float bias, float alpha, float beta,
float* output_data,
const Dims<4>& output_dims) {
tflite::LocalResponseNormalizationParams op_params;
op_params.range = range;
op_params.bias = bias;
op_params.alpha = alpha;
op_params.beta = beta;
LocalResponseNormalization(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
template <typename SrcT, typename DstT>
void Cast(const SrcT* input_data, const Dims<4>& input_dims, DstT* output_data,
const Dims<4>& output_dims) {
Cast(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
inline void Floor(const float* input_data, const Dims<4>& input_dims,
float* output_data, const Dims<4>& output_dims) {
Floor(DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data);
}
template <typename T>
inline void ResizeBilinear(const T* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, T* output_data,
const Dims<4>& output_dims, bool align_corners) {
tflite::ResizeBilinearParams op_params;
op_params.align_corners = align_corners;
op_params.half_pixel_centers = false;
ResizeBilinear(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_size_dims), output_size_data,
DimsToShape(output_dims), output_data);
}
// legacy, for compatibility with old checked-in code
inline void ResizeBilinear(const float* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, float* output_data,
const Dims<4>& output_dims) {
ResizeBilinear<float>(input_data, input_dims, output_size_data,
output_size_dims, output_data, output_dims,
/*align_corners=*/false);
}
inline void ResizeBilinear(const uint8* input_data, const Dims<4>& input_dims,
const int32* output_size_data,
const Dims<4>& output_size_dims, uint8* output_data,
const Dims<4>& output_dims) {
ResizeBilinear<uint8>(input_data, input_dims, output_size_data,
output_size_dims, output_data, output_dims,
/*align_corners=*/false);
}
template <typename T>
inline void SpaceToBatchND(const T* input_data, const Dims<4>& input_dims,
const int32* block_shape_data,
const Dims<4>& block_shape_dims,
const int32* paddings_data,
const Dims<4>& paddings_dims, T* output_data,
const Dims<4>& output_dims,
const int32_t pad_value) {
tflite::SpaceToBatchParams op_params;
op_params.output_offset = pad_value;
SpaceToBatchND(op_params, DimsToShape(input_dims), input_data,
DimsToShape(block_shape_dims), block_shape_data,
DimsToShape(paddings_dims), paddings_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void SpaceToBatchND(const T* input_data, const Dims<4>& input_dims,
const int32* block_shape_data,
const Dims<4>& block_shape_dims,
const int32* paddings_data,
const Dims<4>& paddings_dims, T* output_data,
const Dims<4>& output_dims) {
tflite::SpaceToBatchParams op_params;
op_params.output_offset = 0;
SpaceToBatchND(op_params, DimsToShape(input_dims), input_data,
DimsToShape(block_shape_dims), block_shape_data,
DimsToShape(paddings_dims), paddings_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
inline void BatchToSpaceND(const T* input_data, const Dims<4>& input_dims,
const int32* block_shape_data,
const Dims<4>& block_shape_dims,
const int32* crops_data, const Dims<4>& crops_dims,
T* output_data, const Dims<4>& output_dims) {
BatchToSpaceND(DimsToShape(input_dims), input_data,
DimsToShape(block_shape_dims), block_shape_data,
DimsToShape(crops_dims), crops_data, DimsToShape(output_dims),
output_data);
}
// Legacy signature, function covered both Pad and PadV2.
template <typename T>
inline void PadV2(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& left_paddings,
const std::vector<int>& right_paddings, T* output_data,
const Dims<4>& output_dims, const T pad_value) {
TFLITE_DCHECK_EQ(left_paddings.size(), 4);
TFLITE_DCHECK_EQ(right_paddings.size(), 4);
tflite::PadParams op_params;
op_params.left_padding_count = 4;
op_params.right_padding_count = 4;
for (int i = 0; i < 4; ++i) {
op_params.left_padding[i] = left_paddings[3 - i];
op_params.right_padding[i] = right_paddings[3 - i];
}
// SetFloatOrInt(pad_value, &op_params.pad_value);
const T pad_value_copy = pad_value;
Pad(op_params, DimsToShape(input_dims), input_data, &pad_value_copy,
DimsToShape(output_dims), output_data);
}
// Old Pad that calls legacy PadV2.
template <typename T>
inline void Pad(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& left_paddings,
const std::vector<int>& right_paddings, T* output_data,
const Dims<4>& output_dims, const int32_t pad_value) {
const T converted_pad_value = static_cast<T>(pad_value);
PadV2<T>(input_data, input_dims, left_paddings, right_paddings, output_data,
output_dims, converted_pad_value);
}
// Old Pad that only padded with 0.
template <typename T>
inline void Pad(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& left_paddings,
const std::vector<int>& right_paddings, T* output_data,
const Dims<4>& output_dims) {
const T pad_value = static_cast<T>(0);
PadV2<T>(input_data, input_dims, left_paddings, right_paddings, output_data,
output_dims, pad_value);
}
template <typename T>
void TensorFlowMinimum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
Minimum(DimsToShape(input1_dims), input1_data, input2_data,
DimsToShape(output_dims), output_data);
}
template <typename T>
void TensorFlowMaximum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, T* output_data,
const Dims<4>& output_dims) {
Maximum(DimsToShape(input1_dims), input1_data, input2_data,
DimsToShape(output_dims), output_data);
}
template <typename T, typename Op>
void TensorFlowMaximumMinimum(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T* output_data, const Dims<4>& output_dims,
Op op) {
MaximumMinimumBroadcastSlow(DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data, op);
}
template <typename T1, typename T2, typename T3>
void ArgMax(const T3* axis, const T1* input_data,
const tflite::Dims<4>& input_dims, T2* output_data,
const tflite::Dims<4>& output_dims) {
// Assumes the input always has 4 dimensions, and therefore,
// output always has three dimensions.
auto output_shape = RuntimeShape(
{output_dims.sizes[2], output_dims.sizes[1], output_dims.sizes[0]});
// Another way to interpret this is that output_dims.sizes[4] is always 1.
TFLITE_DCHECK_EQ(output_shape.FlatSize(),
DimsToShape(output_dims).FlatSize());
// Legacy path only supported this.
TFLITE_DCHECK_EQ(axis[0], 3);
ArgMinMax(DimsToShape(input_dims), input_data, axis, output_shape,
output_data, std::greater<T1>());
}
template <typename T1, typename T2, typename T3, typename Cmp>
void ArgMinMax(const T3* axis, const T1* input_data, const Dims<4>& input_dims,
T2* output_data, const Dims<4>& output_dims, const Cmp& cmp) {
ArgMinMax(axis, DimsToShape(input_dims), input_data, DimsToShape(output_dims),
output_data, cmp);
}
template <typename T>
inline void Pow(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T* output_data, const Dims<4>& output_dims) {
Pow(DimsToShape(input1_dims), input1_data, DimsToShape(input2_dims),
input2_data, DimsToShape(output_dims), output_data);
}
template <typename T>
inline void BroadcastPow(const T* input1_data, const Dims<4>& input1_dims,
const T* input2_data, const Dims<4>& input2_dims,
T* output_data, const Dims<4>& output_dims) {
BroadcastPow4DSlow(DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data);
}
// R: Result type. T1: Input 1 type. T2: Input 2 type.
template <typename R, typename T1, typename T2>
inline void BroadcastBinaryFunction(const T1* input1_data,
const Dims<4>& input1_dims,
const T2* input2_data,
const Dims<4>& input2_dims, R* output_data,
const Dims<4>& output_dims,
R (*func)(T1, T2)) {
BroadcastBinaryFunction(DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data, func);
}
// R: Result type. T1: Input 1 type. T2: Input 2 type.
template <typename R, typename T1, typename T2>
inline void BinaryFunction(const T1* input1_data, const Dims<4>& input1_dims,
const T2* input2_data, const Dims<4>& input2_dims,
R* output_data, const Dims<4>& output_dims,
R (*func)(T1, T2)) {
BinaryFunction(DimsToShape(input1_dims), input1_data,
DimsToShape(input2_dims), input2_data,
DimsToShape(output_dims), output_data, func);
}
template <typename T>
inline void Slice(const T* input_data, const Dims<4>& input_dims,
const std::vector<int>& begin, const std::vector<int>& size,
T* output_data, const Dims<4>& output_dims) {
tflite::SliceParams op_params;
op_params.begin_count = 4;
op_params.size_count = 4;
for (int i = 0; i < 4; ++i) {
op_params.begin[i] = begin[3 - i];
op_params.size[i] = size[3 - i];
}
Slice(op_params, DimsToShape(input_dims), input_data,
DimsToShape(output_dims), output_data);
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LEGACY_REFERENCE_OPS_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/legacy_reference_ops.h | C++ | apache-2.0 | 108,079 |
/* Copyright 2021 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
#include <algorithm>
#include <cstddef>
#include <limits>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
inline void LogSoftmax(const SoftmaxParams& params,
const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
for (int i = 0; i < outer_size; ++i) {
// Find max element value which we'll use to ensure numerical stability
// taking advantage of the following equality:
// log(exp(x[i])/sum(exp(x[i]))) == log(exp(x[i]+C)/sum(exp(x[i]+C)))
float max = std::numeric_limits<float>::lowest();
for (int c = 0; c < depth; ++c) {
max = std::max(max, input_data[i * depth + c]);
}
// Compute sum.
float sum = 0.f;
for (int c = 0; c < depth; ++c) {
sum += std::exp(input_data[i * depth + c] - max);
}
// Compute result.
const float log_sum = std::log(sum);
for (int c = 0; c < depth; ++c) {
output_data[i * depth + c] = input_data[i * depth + c] - max - log_sum;
}
}
}
inline void LogSoftmax(const SoftmaxParams& params,
const RuntimeShape& input_shape,
const uint8_t* input_data,
const RuntimeShape& output_shape, uint8_t* output_data) {
const int32_t input_multiplier = params.input_multiplier;
const int32_t input_left_shift = params.input_left_shift;
const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor;
const int32_t reverse_scaling_right_shift =
params.reverse_scaling_right_shift;
const int diff_min = params.diff_min;
// The representation chosen for the input to the exp() function is Q5.26.
// We need to leave extra space since values that we skip might be as large
// as -32 before multiplying by input_beta_multiplier, and therefore as
// large as -16 afterwards. Note that exp(-8) is definitely not
// insignificant to accumulation, but exp(-16) definitely is.
static constexpr int kScaledDiffIntegerBits = 5;
static constexpr int kAccumulationIntegerBits = 12;
static constexpr int kOutputIntegerBits = 4;
using FixedPointScaledDiff =
gemmlowp::FixedPoint<int32_t, kScaledDiffIntegerBits>;
using FixedPointAccum =
gemmlowp::FixedPoint<int32_t, kAccumulationIntegerBits>;
const int trailing_dim = input_shape.DimensionsCount() - 1;
const int outer_size =
MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape);
const int depth =
MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim);
for (int i = 0; i < outer_size; ++i) {
uint8_t max_in_row = 0;
for (int c = 0; c < depth; ++c) {
max_in_row = std::max(max_in_row, input_data[i * depth + c]);
}
FixedPointAccum sum_of_exps = FixedPointAccum::Zero();
for (int c = 0; c < depth; ++c) {
int32_t input_diff =
static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
if (input_diff >= diff_min) {
const int32_t input_diff_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_diff, input_multiplier, input_left_shift);
const FixedPointScaledDiff scaled_diff_f8 =
FixedPointScaledDiff::FromRaw(input_diff_rescaled);
sum_of_exps = sum_of_exps + gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(scaled_diff_f8));
}
}
const int32_t fixed_log_sum_of_exps =
log_x_for_x_greater_than_or_equal_to_1<kScaledDiffIntegerBits>(
sum_of_exps)
.raw();
// rescaled_diff_min is smallest representable in
// Q(kScaledDiffIntegerBits).(31-kScaledDiffIntegerBits) plus the
// log-sub-exps that will be subtracted in the loop.
//
// The thresholds diff_min, etc are negative.
const int rescaled_diff_min =
fixed_log_sum_of_exps + std::numeric_limits<int32_t>::lowest();
const int adjusted_diff_min =
std::max(static_cast<int32_t>(
diff_min - 1), // Note use of > below instead of >= above.
MultiplyByQuantizedMultiplierSmallerThanOneExp(
rescaled_diff_min, reverse_scaling_divisor,
-reverse_scaling_right_shift));
for (int c = 0; c < depth; ++c) {
int32_t input_diff =
static_cast<int32_t>(input_data[i * depth + c]) - max_in_row;
if (input_diff > adjusted_diff_min) {
const int32_t input_diff_rescaled =
MultiplyByQuantizedMultiplierGreaterThanOne(
input_diff, input_multiplier, input_left_shift);
int32_t unsat_output =
gemmlowp::RoundingDivideByPOT(
(input_diff_rescaled - fixed_log_sum_of_exps),
31 - kScaledDiffIntegerBits - kOutputIntegerBits) +
255;
output_data[i * depth + c] = static_cast<uint8_t>(
std::max(std::min(unsat_output, static_cast<int32_t>(255)),
static_cast<int32_t>(0)));
} else {
// Set output to smallest value.
output_data[i * depth + c] = 0;
}
}
}
}
template <typename T>
inline void LogSoftmaxQuantized(const SoftmaxParams& params,
const size_t outer_size, const size_t depth,
const RuntimeShape& input_shape,
const T* input_data,
const RuntimeShape& output_shape,
T* output_data) {
const int32_t input_multiplier = params.input_multiplier;
const int32_t input_left_shift = params.input_left_shift;
const int32_t reverse_scaling_divisor = params.reverse_scaling_divisor;
const int32_t reverse_scaling_right_shift =
params.reverse_scaling_right_shift;
const int diff_min = params.diff_min;
static constexpr T kMinT8 = std::numeric_limits<T>::min();
static constexpr T kMaxT8 = std::numeric_limits<T>::max();
static constexpr int32_t kMinInt32 = std::numeric_limits<int32_t>::min();
// All IntegerBits must agree with Prepare function.
// Input is chosen as Q5.26 so exp(-1 * 2^5 * 2^-1) = exp(-16) is negligible.
static constexpr int kInputIntegerBits = 5;
static constexpr int kAccumulationIntegerBits = 12;
static constexpr int kOutputIntegerBits = 4;
using F5 = gemmlowp::FixedPoint<int32_t, kInputIntegerBits>;
using F12 = gemmlowp::FixedPoint<int32_t, kAccumulationIntegerBits>;
for (size_t outer_index = 0; outer_index < outer_size; ++outer_index) {
T max_in_row = kMinT8;
for (size_t inner_index = 0; inner_index < depth; ++inner_index) {
max_in_row =
std::max(max_in_row, input_data[outer_index * depth + inner_index]);
}
// Accumulator "sum_of_exps_in_q12" is safe from overflowing in 2^12 steps.
F12 sum_of_exps_in_q12 = F12::FromRaw(0);
for (size_t inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input_diff =
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
max_in_row;
if (input_diff >= diff_min) {
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
input_diff, input_multiplier, input_left_shift);
sum_of_exps_in_q12 =
sum_of_exps_in_q12 +
gemmlowp::Rescale<kAccumulationIntegerBits>(
exp_on_negative_values(F5::FromRaw(input_diff_in_q5)));
}
}
const int32_t log_sum_of_exps_in_q5 =
log_x_for_x_greater_than_or_equal_to_1<kInputIntegerBits>(
sum_of_exps_in_q12)
.raw();
// Potentially reduced the valid range. shifted_log_sum_of_exps_in_q5 is
// smallest representable in Q5.26 plus the log_sum_of_exps.
const int32_t shifted_log_sum_of_exps_in_q5 =
log_sum_of_exps_in_q5 + kMinInt32;
const int32_t adjusted_diff_min =
std::max(static_cast<int32_t>(diff_min - 1),
MultiplyByQuantizedMultiplier(shifted_log_sum_of_exps_in_q5,
reverse_scaling_divisor,
-reverse_scaling_right_shift));
for (size_t inner_index = 0; inner_index < depth; ++inner_index) {
int32_t input_diff =
static_cast<int32_t>(input_data[outer_index * depth + inner_index]) -
max_in_row;
// Note use of > below instead of >= above.
if (input_diff > adjusted_diff_min) {
const int32_t input_diff_in_q5 = MultiplyByQuantizedMultiplier(
input_diff, input_multiplier, input_left_shift);
// Rescale and downcast.
int32_t output_in_q27 =
gemmlowp::RoundingDivideByPOT(
(input_diff_in_q5 - log_sum_of_exps_in_q5),
31 - kInputIntegerBits - kOutputIntegerBits) +
kMaxT8;
output_in_q27 =
std::max(std::min(output_in_q27, static_cast<int32_t>(kMaxT8)),
static_cast<int32_t>(kMinT8));
output_data[outer_index * depth + inner_index] =
static_cast<T>(output_in_q27);
} else {
output_data[outer_index * depth + inner_index] = kMinT8;
}
}
}
}
inline void LogSoftmax(const SoftmaxParams& params, const size_t outer_size,
const size_t depth, const RuntimeShape& input_shape,
const int8_t* input_data,
const RuntimeShape& output_shape, int8_t* output_data) {
LogSoftmaxQuantized(params, outer_size, depth, input_shape, input_data,
output_shape, output_data);
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOG_SOFTMAX_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/log_softmax.h | C++ | apache-2.0 | 10,876 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_
#include <cmath>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
#include "tensorflow/lite/kernels/op_macros.h"
namespace tflite {
namespace reference_ops {
inline void Logistic(const RuntimeShape& input_shape, const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
const float cutoff_upper = 16.619047164916992188f;
const float cutoff_lower = -9.f;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
// Rational for using approximation in reference kernel.
// 0. This approximation gives enough precision for float.
// 1. This works around an issue on an embedded chipset where exp() does not
// return correctly as expected - exp(x) should return inf when overflown
// not 1.701417 IEEE 754 defines representation for inf.
// 2. This will speed up calculation and is matching the behavior in the
// optimized kernels. (check the definition of scalar_logistic_op<float>)
for (int i = 0; i < flat_size; i++) {
float val = input_data[i];
float result;
if (val > cutoff_upper) {
result = 1.0f;
} else if (val < cutoff_lower) {
result = std::exp(val);
} else {
result = 1.f / (1.f + std::exp(-val));
}
output_data[i] = result;
}
}
// Convenience version that allows, for example, generated-code calls to be
// uniform between data types.
inline void Logistic(const LogisticParams&, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
// Drop params: not needed.
Logistic(input_shape, input_data, output_shape, output_data);
}
inline void Logistic(const LogisticParams& params,
const RuntimeShape& input_shape, const int16_t* input_data,
const RuntimeShape& output_shape, int16_t* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; i++) {
// F0 uses 0 integer bits, range [-1, 1].
// This is the return type of math functions such as tanh, logistic,
// whose range is in [-1, 1].
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
// F3 uses 3 integer bits, range [-8, 8], the input range expected here.
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
const F3 input = F3::FromRaw(input_data[i]);
F0 output = gemmlowp::logistic(input);
output_data[i] = output.raw();
}
}
// Quantized int8_t logistic activation. Cheats by dequantizing and
// requantizing around the floating point logistic method. This implementation
// is slow on platforms without a floating point unit.
// TODO(b/141211002): Delete this int8_t implementation once we can reuse the
// approach used in TFLite for int8_t Logistic.
inline void Logistic(const RuntimeShape& input_shape, const int8_t* input_data,
float input_scale, int input_zero_point,
const RuntimeShape& output_shape, int8_t* output_data,
float output_scale, int output_zero_point) {
const float cutoff_upper = 16.619047164916992188f;
const float cutoff_lower = -9.f;
const int flat_size = MatchingFlatSize(input_shape, output_shape);
// Rational for using approximation in reference kernel.
// 0. This approximation gives enough precision for float.
// 1. This works around an issue on an embedded chipset where exp() does not
// return correctly as expected - exp(x) should return inf when overflown
// not 1.701417 IEEE 754 defines representation for inf.
// 2. This will speed up calculation and is matching the behavior in the
// optimized kernels. (check the definition of scalar_logistic_op<float>)
for (int i = 0; i < flat_size; i++) {
// Dequantize.
float val =
static_cast<float>((input_data[i] - input_zero_point) * input_scale);
float result;
if (val > cutoff_upper) {
result = 1.0f;
} else if (val < cutoff_lower) {
result = std::exp(val);
} else {
result = 1.f / (1.f + std::exp(-val));
}
// Requantize
int8_t output =
static_cast<int8_t>(result / output_scale + output_zero_point);
output_data[i] = output;
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_LOGISTIC_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/logistic.h | C++ | apache-2.0 | 5,355 |
/* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T, typename Op, int N = 5>
void MaximumMinimumBroadcastSlow(const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data, Op op) {
// Uses element-wise calculation if broadcast is not required.
if (unextended_input1_shape == unextended_input2_shape) {
const int flat_size =
MatchingElementsSize(unextended_input1_shape, unextended_input2_shape,
unextended_output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = op(input1_data[i], input2_data[i]);
}
} else {
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), N);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), N);
NdArrayDesc<N> desc1;
NdArrayDesc<N> desc2;
NdArrayDesc<N> output_desc;
NdArrayDescsForElementwiseBroadcast(
unextended_input1_shape, unextended_input2_shape, &desc1, &desc2);
CopyDimsToDesc(RuntimeShape::ExtendedShape(N, unextended_output_shape),
&output_desc);
auto maxmin_func = [&](int indexes[N]) {
output_data[SubscriptToIndex(output_desc, indexes)] =
op(input1_data[SubscriptToIndex(desc1, indexes)],
input2_data[SubscriptToIndex(desc2, indexes)]);
};
NDOpsHelper<N>(output_desc, maxmin_func);
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MAXIMUM_MINIMUM_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/maximum_minimum.h | C++ | apache-2.0 | 2,742 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
#include "tensorflow/lite/kernels/internal/common.h"
namespace tflite {
namespace reference_ops {
// Element-wise mul that can often be used for inner loop of broadcast Mul as
// well as the non-broadcast Mul.
inline void MulElementwise(int size, const ArithmeticParams& params,
const uint8_t* input1_data,
const uint8_t* input2_data, uint8_t* output_data) {
for (int i = 0; i < size; ++i) {
const int32_t input1_val = params.input1_offset + input1_data[i];
const int32_t input2_val = params.input2_offset + input2_data[i];
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32_t clamped_output =
std::min(params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[i] = static_cast<uint8_t>(clamped_output);
}
}
template <typename T>
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const T* input1_data,
const RuntimeShape& input2_shape, const T* input2_data,
const RuntimeShape& output_shape, T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
const int flat_size =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = ActivationFunctionWithMinMax(
input1_data[i] * input2_data[i], output_activation_min,
output_activation_max);
}
}
inline void Mul(const ArithmeticParams& params,
const RuntimeShape& input1_shape, const uint8_t* input1_data,
const RuntimeShape& input2_shape, const uint8_t* input2_data,
const RuntimeShape& output_shape, uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
const int flat_size =
MatchingFlatSize(input1_shape, input2_shape, output_shape);
MulElementwise(flat_size, params, input1_data, input2_data, output_data);
}
inline void BroadcastMul4DSlow(const ArithmeticParams& params,
const RuntimeShape& input1_shape,
const uint8_t* input1_data,
const RuntimeShape& input2_shape,
const uint8_t* input2_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(input1_shape, input2_shape, &desc1,
&desc2);
const RuntimeShape extended_output_shape =
RuntimeShape::ExtendedShape(4, output_shape);
for (int b = 0; b < extended_output_shape.Dims(0); ++b) {
for (int y = 0; y < extended_output_shape.Dims(1); ++y) {
for (int x = 0; x < extended_output_shape.Dims(2); ++x) {
for (int c = 0; c < extended_output_shape.Dims(3); ++c) {
const int32_t input1_val =
params.input1_offset +
input1_data[SubscriptToIndex(desc1, b, y, x, c)];
const int32_t input2_val =
params.input2_offset +
input2_data[SubscriptToIndex(desc2, b, y, x, c)];
const int32_t unclamped_result =
params.output_offset +
MultiplyByQuantizedMultiplier(input1_val * input2_val,
params.output_multiplier,
params.output_shift);
const int32_t clamped_output = std::min(
params.quantized_activation_max,
std::max(params.quantized_activation_min, unclamped_result));
output_data[Offset(extended_output_shape, b, y, x, c)] =
static_cast<uint8_t>(clamped_output);
}
}
}
}
}
template <typename T>
void BroadcastMul4DSlow(const ArithmeticParams& params,
const RuntimeShape& unextended_input1_shape,
const T* input1_data,
const RuntimeShape& unextended_input2_shape,
const T* input2_data,
const RuntimeShape& unextended_output_shape,
T* output_data) {
T output_activation_min;
T output_activation_max;
GetActivationParams(params, &output_activation_min, &output_activation_max);
TFLITE_DCHECK_LE(unextended_input1_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_input2_shape.DimensionsCount(), 4);
TFLITE_DCHECK_LE(unextended_output_shape.DimensionsCount(), 4);
const RuntimeShape output_shape =
RuntimeShape::ExtendedShape(4, unextended_output_shape);
NdArrayDesc<4> desc1;
NdArrayDesc<4> desc2;
NdArrayDescsForElementwiseBroadcast(unextended_input1_shape,
unextended_input2_shape, &desc1, &desc2);
// In Tensorflow, the dimensions are canonically named (batch_number, row,
// col, channel), with extents (batches, height, width, depth), with the
// trailing dimension changing most rapidly (channels has the smallest stride,
// typically 1 element).
//
// In generated C code, we store arrays with the dimensions reversed. The
// first dimension has smallest stride.
//
// We name our variables by their Tensorflow convention, but generate C code
// nesting loops such that the innermost loop has the smallest stride for the
// best cache behavior.
for (int b = 0; b < output_shape.Dims(0); ++b) {
for (int y = 0; y < output_shape.Dims(1); ++y) {
for (int x = 0; x < output_shape.Dims(2); ++x) {
for (int c = 0; c < output_shape.Dims(3); ++c) {
output_data[Offset(output_shape, b, y, x, c)] =
ActivationFunctionWithMinMax(
input1_data[SubscriptToIndex(desc1, b, y, x, c)] *
input2_data[SubscriptToIndex(desc2, b, y, x, c)],
output_activation_min, output_activation_max);
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_MUL_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/mul.h | C++ | apache-2.0 | 7,236 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
template <typename T>
inline void Negate(const RuntimeShape& input_shape, const T* input_data,
const RuntimeShape& output_shape, T* output_data) {
const int flat_size = MatchingFlatSize(input_shape, output_shape);
for (int i = 0; i < flat_size; ++i) {
output_data[i] = -input_data[i];
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NEG_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/neg.h | C++ | apache-2.0 | 1,321 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NON_MAX_SUPPRESSION_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NON_MAX_SUPPRESSION_H_
#include <algorithm>
#include <cmath>
#include <deque>
#include <queue>
namespace tflite {
namespace reference_ops {
// A pair of diagonal corners of the box.
struct BoxCornerEncoding {
float y1;
float x1;
float y2;
float x2;
};
inline float ComputeIntersectionOverUnion(const float* boxes, const int i,
const int j) {
auto& box_i = reinterpret_cast<const BoxCornerEncoding*>(boxes)[i];
auto& box_j = reinterpret_cast<const BoxCornerEncoding*>(boxes)[j];
const float box_i_y_min = std::min<float>(box_i.y1, box_i.y2);
const float box_i_y_max = std::max<float>(box_i.y1, box_i.y2);
const float box_i_x_min = std::min<float>(box_i.x1, box_i.x2);
const float box_i_x_max = std::max<float>(box_i.x1, box_i.x2);
const float box_j_y_min = std::min<float>(box_j.y1, box_j.y2);
const float box_j_y_max = std::max<float>(box_j.y1, box_j.y2);
const float box_j_x_min = std::min<float>(box_j.x1, box_j.x2);
const float box_j_x_max = std::max<float>(box_j.x1, box_j.x2);
const float area_i =
(box_i_y_max - box_i_y_min) * (box_i_x_max - box_i_x_min);
const float area_j =
(box_j_y_max - box_j_y_min) * (box_j_x_max - box_j_x_min);
if (area_i <= 0 || area_j <= 0) return 0.0;
const float intersection_ymax = std::min<float>(box_i_y_max, box_j_y_max);
const float intersection_xmax = std::min<float>(box_i_x_max, box_j_x_max);
const float intersection_ymin = std::max<float>(box_i_y_min, box_j_y_min);
const float intersection_xmin = std::max<float>(box_i_x_min, box_j_x_min);
const float intersection_area =
std::max<float>(intersection_ymax - intersection_ymin, 0.0) *
std::max<float>(intersection_xmax - intersection_xmin, 0.0);
return intersection_area / (area_i + area_j - intersection_area);
}
// Implements (Single-Class) Soft NMS (with Gaussian weighting).
// Supports functionality of TensorFlow ops NonMaxSuppressionV4 & V5.
// Reference: "Soft-NMS - Improving Object Detection With One Line of Code"
// [Bodla et al, https://arxiv.org/abs/1704.04503]
// Implementation adapted from the TensorFlow NMS code at
// tensorflow/core/kernels/non_max_suppression_op.cc.
//
// Arguments:
// boxes: box encodings in format [y1, x1, y2, x2], shape: [num_boxes, 4]
// num_boxes: number of candidates
// scores: scores for candidate boxes, in the same order. shape: [num_boxes]
// max_output_size: the maximum number of selections.
// iou_threshold: Intersection-over-Union (IoU) threshold for NMS
// score_threshold: All candidate scores below this value are rejected
// soft_nms_sigma: Soft NMS parameter, used for decaying scores
//
// Outputs:
// selected_indices: all the selected indices. Underlying array must have
// length >= max_output_size. Cannot be null.
// selected_scores: scores of selected indices. Defer from original value for
// Soft NMS. If not null, array must have length >= max_output_size.
// num_selected_indices: Number of selections. Only these many elements are
// set in selected_indices, selected_scores. Cannot be null.
//
// Assumes inputs are valid (for eg, iou_threshold must be >= 0).
inline void NonMaxSuppression(const float* boxes, const int num_boxes,
const float* scores, const int max_output_size,
const float iou_threshold,
const float score_threshold,
const float soft_nms_sigma, int* selected_indices,
float* selected_scores,
int* num_selected_indices) {
struct Candidate {
int index;
float score;
int suppress_begin_index;
};
// Priority queue to hold candidates.
auto cmp = [](const Candidate bs_i, const Candidate bs_j) {
return bs_i.score < bs_j.score;
};
std::priority_queue<Candidate, std::deque<Candidate>, decltype(cmp)>
candidate_priority_queue(cmp);
// Populate queue with candidates above the score threshold.
for (int i = 0; i < num_boxes; ++i) {
if (scores[i] > score_threshold) {
candidate_priority_queue.emplace(Candidate({i, scores[i], 0}));
}
}
*num_selected_indices = 0;
int num_outputs = std::min(static_cast<int>(candidate_priority_queue.size()),
max_output_size);
if (num_outputs == 0) return;
// NMS loop.
float scale = 0;
if (soft_nms_sigma > 0.0) {
scale = -0.5 / soft_nms_sigma;
}
while (*num_selected_indices < num_outputs &&
!candidate_priority_queue.empty()) {
Candidate next_candidate = candidate_priority_queue.top();
const float original_score = next_candidate.score;
candidate_priority_queue.pop();
// Overlapping boxes are likely to have similar scores, therefore we
// iterate through the previously selected boxes backwards in order to
// see if `next_candidate` should be suppressed. We also enforce a property
// that a candidate can be suppressed by another candidate no more than
// once via `suppress_begin_index` which tracks which previously selected
// boxes have already been compared against next_candidate prior to a given
// iteration. These previous selected boxes are then skipped over in the
// following loop.
bool should_hard_suppress = false;
for (int j = *num_selected_indices - 1;
j >= next_candidate.suppress_begin_index; --j) {
const float iou = ComputeIntersectionOverUnion(
boxes, next_candidate.index, selected_indices[j]);
// First decide whether to perform hard suppression.
if (iou >= iou_threshold) {
should_hard_suppress = true;
break;
}
// Suppress score if NMS sigma > 0.
if (soft_nms_sigma > 0.0) {
next_candidate.score =
next_candidate.score * std::exp(scale * iou * iou);
}
// If score has fallen below score_threshold, it won't be pushed back into
// the queue.
if (next_candidate.score <= score_threshold) break;
}
// If `next_candidate.score` has not dropped below `score_threshold`
// by this point, then we know that we went through all of the previous
// selections and can safely update `suppress_begin_index` to
// `selected.size()`. If on the other hand `next_candidate.score`
// *has* dropped below the score threshold, then since `suppress_weight`
// always returns values in [0, 1], further suppression by items that were
// not covered in the above for loop would not have caused the algorithm
// to select this item. We thus do the same update to
// `suppress_begin_index`, but really, this element will not be added back
// into the priority queue.
next_candidate.suppress_begin_index = *num_selected_indices;
if (!should_hard_suppress) {
if (next_candidate.score == original_score) {
// Suppression has not occurred, so select next_candidate.
selected_indices[*num_selected_indices] = next_candidate.index;
if (selected_scores) {
selected_scores[*num_selected_indices] = next_candidate.score;
}
++*num_selected_indices;
}
if (next_candidate.score > score_threshold) {
// Soft suppression might have occurred and current score is still
// greater than score_threshold; add next_candidate back onto priority
// queue.
candidate_priority_queue.push(next_candidate);
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_NON_MAX_SUPPRESSION_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/non_max_suppression.h | C++ | apache-2.0 | 8,416 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
#include <vector>
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
// TFLite Pad supports activation tensors with up to 5 dimensions.
constexpr int PadKernelMaxDimensionCount() { return 5; }
// There are two versions of pad: Pad and PadV2. In PadV2 there is a second
// scalar input that provides the padding value. Therefore pad_value_ptr can be
// equivalent to a simple input1_data. For Pad, it should point to a zero
// value.
//
// Note that two typenames are required, so that T=P=int32_t is considered a
// specialization distinct from P=int32_t.
template <typename T, typename P>
inline void PadImpl(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
const RuntimeShape ext_input_shape =
RuntimeShape::ExtendedShape(PadKernelMaxDimensionCount(), input_shape);
const RuntimeShape ext_output_shape =
RuntimeShape::ExtendedShape(PadKernelMaxDimensionCount(), output_shape);
TFLITE_DCHECK_LE(op_params.left_padding_count, PadKernelMaxDimensionCount());
TFLITE_DCHECK_LE(op_params.right_padding_count, PadKernelMaxDimensionCount());
// Runtime calls are currently fixed at 5 dimensions. Copy inputs so we can
// pad them to 5 dims (yes, we are "padding the padding").
int left_padding_copy[PadKernelMaxDimensionCount()];
for (int i = 0; i < PadKernelMaxDimensionCount(); i++) {
left_padding_copy[i] = 0;
}
for (int i = 0; i < op_params.left_padding_count; ++i) {
left_padding_copy[i + PadKernelMaxDimensionCount() -
op_params.left_padding_count] = op_params.left_padding[i];
}
int right_padding_copy[PadKernelMaxDimensionCount()];
for (int i = 0; i < PadKernelMaxDimensionCount(); i++) {
right_padding_copy[i] = 0;
}
for (int i = 0; i < op_params.right_padding_count; ++i) {
right_padding_copy[i + PadKernelMaxDimensionCount() -
op_params.right_padding_count] =
op_params.right_padding[i];
}
const int output_batch = ext_output_shape.Dims(0);
const int output_plane = ext_output_shape.Dims(1);
const int output_height = ext_output_shape.Dims(2);
const int output_width = ext_output_shape.Dims(3);
const int output_depth = ext_output_shape.Dims(4);
const int left_b_padding = left_padding_copy[0];
const int left_p_padding = left_padding_copy[1];
const int left_h_padding = left_padding_copy[2];
const int left_w_padding = left_padding_copy[3];
const int left_d_padding = left_padding_copy[4];
const int right_b_padding = right_padding_copy[0];
const int right_p_padding = right_padding_copy[1];
const int right_h_padding = right_padding_copy[2];
const int right_w_padding = right_padding_copy[3];
const int right_d_padding = right_padding_copy[4];
const T pad_value = *pad_value_ptr;
const T* in_ptr = input_data;
T* out_ptr = output_data;
for (int out_b = 0; out_b < output_batch; ++out_b) {
for (int out_p = 0; out_p < output_plane; ++out_p) {
for (int out_h = 0; out_h < output_height; ++out_h) {
for (int out_w = 0; out_w < output_width; ++out_w) {
for (int out_d = 0; out_d < output_depth; ++out_d) {
if (out_b < left_b_padding ||
out_b >= output_batch - right_b_padding ||
out_p < left_p_padding ||
out_p >= output_plane - right_p_padding ||
out_h < left_h_padding ||
out_h >= output_height - right_h_padding ||
out_w < left_w_padding ||
out_w >= output_width - right_w_padding ||
out_d < left_d_padding ||
out_d >= output_depth - right_d_padding) {
*out_ptr++ = pad_value;
} else {
*out_ptr++ = *in_ptr++;
}
}
}
}
}
}
}
template <typename T, typename P>
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
// The second (pad-value) input can be int32_t when, say, the first is uint8_t.
template <typename T>
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
T* output_data) {
const T converted_pad_value = static_cast<T>(*pad_value_ptr);
PadImpl(op_params, input_shape, input_data, &converted_pad_value,
output_shape, output_data);
}
// This version avoids conflicting template matching.
template <>
inline void Pad(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const int32_t* input_data,
const int32_t* pad_value_ptr, const RuntimeShape& output_shape,
int32_t* output_data) {
PadImpl(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
template <typename T, typename P>
inline void PadImageStyle(const tflite::PadParams& op_params,
const RuntimeShape& input_shape, const T* input_data,
const P* pad_value_ptr,
const RuntimeShape& output_shape, T* output_data) {
Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
template <typename P>
inline void PadImageStyle(const tflite::PadParams& op_params,
const RuntimeShape& input_shape,
const float* input_data, const P* pad_value_ptr,
const RuntimeShape& output_shape,
float* output_data) {
Pad(op_params, input_shape, input_data, pad_value_ptr, output_shape,
output_data);
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_PAD_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/pad.h | C++ | apache-2.0 | 6,998 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_
#define TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/quantization_util.h"
#include "tensorflow/lite/kernels/internal/types.h"
namespace tflite {
namespace reference_ops {
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const float* input_data,
const RuntimeShape& output_shape, float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
float total = 0.f;
float filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
total +=
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
filter_count++;
}
}
const float average = total / filter_count;
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
ActivationFunctionWithMinMax(average, params.float_activation_min,
params.float_activation_max);
}
}
}
}
}
inline void AveragePool(const PoolParams& params,
const RuntimeShape& input_shape,
const uint8_t* input_data,
const RuntimeShape& output_shape,
uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
int32_t acc = 0;
int filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
acc +=
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
filter_count++;
}
}
acc = (acc + filter_count / 2) / filter_count;
acc = std::max(acc, params.quantized_activation_min);
acc = std::min(acc, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<uint8_t>(acc);
}
}
}
}
}
inline void L2Pool(const PoolParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
float sum_squares = 0.f;
int filter_count = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
const float val =
input_data[Offset(input_shape, batch, in_y, in_x, channel)];
sum_squares += val * val;
filter_count++;
}
}
const float l2pool_result = std::sqrt(sum_squares / filter_count);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
ActivationFunctionWithMinMax(l2pool_result,
params.float_activation_min,
params.float_activation_max);
}
}
}
}
}
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const float* input_data, const RuntimeShape& output_shape,
float* output_data) {
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
float max = std::numeric_limits<float>::lowest();
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
max = std::max(
max,
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
}
}
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
ActivationFunctionWithMinMax(max, params.float_activation_min,
params.float_activation_max);
}
}
}
}
}
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
const uint8_t* input_data, const RuntimeShape& output_shape,
uint8_t* output_data) {
TFLITE_DCHECK_LE(params.quantized_activation_min,
params.quantized_activation_max);
TFLITE_DCHECK_GE(params.quantized_activation_min, 0);
TFLITE_DCHECK_LE(params.quantized_activation_max, 255);
TFLITE_DCHECK_EQ(input_shape.DimensionsCount(), 4);
TFLITE_DCHECK_EQ(output_shape.DimensionsCount(), 4);
const int batches = MatchingDim(input_shape, 0, output_shape, 0);
const int depth = MatchingDim(input_shape, 3, output_shape, 3);
const int input_height = input_shape.Dims(1);
const int input_width = input_shape.Dims(2);
const int output_height = output_shape.Dims(1);
const int output_width = output_shape.Dims(2);
const int stride_height = params.stride_height;
const int stride_width = params.stride_width;
for (int batch = 0; batch < batches; ++batch) {
for (int out_y = 0; out_y < output_height; ++out_y) {
for (int out_x = 0; out_x < output_width; ++out_x) {
for (int channel = 0; channel < depth; ++channel) {
const int in_x_origin =
(out_x * stride_width) - params.padding_values.width;
const int in_y_origin =
(out_y * stride_height) - params.padding_values.height;
// Compute the boundaries of the filter region clamped so as to
// ensure that the filter window fits in the input array.
const int filter_x_start = std::max(0, -in_x_origin);
const int filter_x_end =
std::min(params.filter_width, input_width - in_x_origin);
const int filter_y_start = std::max(0, -in_y_origin);
const int filter_y_end =
std::min(params.filter_height, input_height - in_y_origin);
uint8_t max = 0;
for (int filter_y = filter_y_start; filter_y < filter_y_end;
++filter_y) {
for (int filter_x = filter_x_start; filter_x < filter_x_end;
++filter_x) {
const int in_x = in_x_origin + filter_x;
const int in_y = in_y_origin + filter_y;
max = std::max(
max,
input_data[Offset(input_shape, batch, in_y, in_x, channel)]);
}
}
max = std::max<uint8_t>(max, params.quantized_activation_min);
max = std::min<uint8_t>(max, params.quantized_activation_max);
output_data[Offset(output_shape, batch, out_y, out_x, channel)] =
static_cast<uint8_t>(max);
}
}
}
}
}
} // namespace reference_ops
} // namespace tflite
#endif // TENSORFLOW_LITE_KERNELS_INTERNAL_REFERENCE_POOLING_H_
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/pooling.h | C++ | apache-2.0 | 14,148 |
/* Copyright 2019 The TensorFlow Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <algorithm>
#include <cmath>
#include <cstdint>
#include <cstring>
#include <limits>
#include <utility>
#include "fixedpoint/fixedpoint.h"
#include "tensorflow/lite/kernels/internal/common.h"
#include "tensorflow/lite/kernels/internal/compatibility.h"
#include "tensorflow/lite/kernels/internal/cppmath.h"
#include "tensorflow/lite/kernels/internal/reference/portable_tensor_utils_impl.h"
#if defined(_MSC_VER)
#define __restrict__ __restrict
#endif
namespace tflite {
namespace tensor_utils {
namespace {
const int32_t kInt16Max = std::numeric_limits<int16_t>::max();
const int32_t kInt16Min = std::numeric_limits<int16_t>::min();
} // namespace
void PortableSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float* min_value,
float* max_value, float* scaling_factor) {
auto minmax = std::minmax_element(values, values + size);
*min_value = *minmax.first;
*max_value = *minmax.second;
PortableSymmetricQuantizeFloats(values, size, quantized_values, *min_value,
*max_value, scaling_factor);
}
void PortableSymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values, float min_value,
float max_value, float* scaling_factor) {
const int32_t kScale = 127;
const float range = std::max(std::abs(min_value), std::abs(max_value));
if (range == 0) {
memset(quantized_values, 0, size * sizeof(int8_t));
*scaling_factor = 1;
return;
}
*scaling_factor = range / kScale;
const float scaling_factor_inv = kScale / range;
for (int i = 0; i < size; ++i) {
const int32_t quantized_value =
static_cast<int32_t>(TfLiteRound(values[i] * scaling_factor_inv));
// Clamp: just in case some odd numeric offset.
quantized_values[i] = static_cast<int8_t>(
std::min(kScale, std::max(-kScale, quantized_value)));
}
}
void PortableAsymmetricQuantizeFloats(const float* values, const int size,
int8_t* quantized_values,
float* scaling_factor, int32_t* offset) {
const int32_t kMinScale = -128;
const int32_t kMaxScale = 127;
const double qmin_double = kMinScale;
const double qmax_double = kMaxScale;
const auto minmax = std::minmax_element(values, values + size);
const double rmin = std::fmin(0, *minmax.first);
const double rmax = std::fmax(0, *minmax.second);
if (rmin == rmax) {
memset(quantized_values, 0, size * sizeof(int8_t));
*scaling_factor = 1;
*offset = 0;
return;
} else {
double scale = (rmax - rmin) / (qmax_double - qmin_double);
const double zero_point_from_min = qmin_double - rmin / scale;
const double zero_point_from_max = qmax_double - rmax / scale;
const double zero_point_from_min_error =
std::abs(qmin_double) + std::abs(rmin / scale);
const double zero_point_from_max_error =
std::abs(qmax_double) + std::abs(rmax / scale);
const double zero_point_double =
zero_point_from_min_error < zero_point_from_max_error
? zero_point_from_min
: zero_point_from_max;
int8_t nudged_zero_point = 0;
if (zero_point_double <= qmin_double) {
nudged_zero_point = kMinScale;
} else if (zero_point_double >= qmax_double) {
nudged_zero_point = kMaxScale;
} else {
nudged_zero_point = static_cast<int8_t>(round(zero_point_double));
}
*scaling_factor = scale;
*offset = nudged_zero_point;
}
const float scaling_factor_inv = 1.0 / *scaling_factor;
for (int i = 0; i < size; ++i) {
const int32_t quantized_value = static_cast<int32_t>(
TfLiteRound(*offset + values[i] * scaling_factor_inv));
quantized_values[i] =
std::min(kMaxScale, std::max(kMinScale, quantized_value));
}
}
void PortableMatrixBatchVectorMultiplyAccumulate(const float* matrix,
int m_rows, int m_cols,
const float* vector,
int n_batch, float* result) {
float* result_in_batch = result;
for (int b = 0; b < n_batch; b++) {
const float* matrix_ptr = matrix;
for (int r = 0; r < m_rows; r++) {
float dot_prod = 0.0f;
const float* vector_in_batch = vector + b * m_cols;
for (int c = 0; c < m_cols; c++) {
dot_prod += *matrix_ptr++ * *vector_in_batch++;
}
*result_in_batch += dot_prod;
++result_in_batch;
}
}
}
void PortableMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result) {
for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
const float batch_scaling_factor = scaling_factors[batch];
// Get the address of the first row.
const int8_t* row_ptr = matrix;
for (int row = 0; row < m_rows; ++row) {
// Initialize the dot product sum for the row to 0.
int32_t dotprod = 0;
#if defined(__GNUC__)
// Prefetch the row to cache.
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
3 /* temporal locality */);
#endif
for (int col = 0; col < m_cols; ++col, ++row_ptr) {
dotprod += (*row_ptr) * (vectors[col]);
} // for col
*result += dotprod * batch_scaling_factor;
++result;
} // for row
} // for batch
}
void PortableMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const int m_rows, const int m_cols,
const int8_t* __restrict__ vectors, const float* scaling_factors,
int n_batch, float* __restrict__ result, const float* per_channel_scale,
const int32_t* input_offset, int32_t* scratch, int32_t* row_sums,
bool* compute_row_sums, CpuBackendContext* context) {
if (input_offset == nullptr) {
PortableMatrixBatchVectorMultiplyAccumulate(
matrix, m_rows, m_cols, vectors, scaling_factors, n_batch, result);
return;
}
if (!compute_row_sums || *compute_row_sums) {
PortableReductionSumVector(matrix, row_sums, m_rows, m_cols);
if (compute_row_sums) {
*compute_row_sums = false;
}
}
for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
const float batch_scaling_factor = scaling_factors[batch];
const int32_t batch_offset = input_offset[batch];
const int8_t* row_ptr = matrix;
for (int row = 0; row < m_rows; ++row) {
int32_t dotprod = 0;
float scale = batch_scaling_factor;
if (per_channel_scale) {
scale *= per_channel_scale[row];
}
#if defined(__GNUC__)
// Prefetch the row to cache.
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
3 /* temporal locality */);
#endif
for (int col = 0; col < m_cols; ++col, ++row_ptr) {
dotprod += (*row_ptr) * vectors[col];
} // for col
dotprod -= row_sums[row] * batch_offset;
*result += dotprod * scale;
++result;
} // for row
} // for batch
}
void PortableSparseMatrixBatchVectorMultiplyAccumulate1x4(
const float* __restrict__ matrix, const int32_t* __restrict__ segments,
const int32_t* __restrict__ indices, int m_rows, int m_cols,
const float* __restrict__ vector, int n_batch, float* __restrict__ result) {
const int kBlockSize = 4;
TFLITE_DCHECK_EQ(m_cols % kBlockSize, 0);
for (int batch = 0; batch < n_batch; batch++) {
const float* matrix_ptr = matrix;
for (int row = 0; row < m_rows; row++) {
float dot_prod = 0.0f;
const float* vector_in_batch = vector + batch * m_cols;
for (int i = segments[row]; i < segments[row + 1]; i++) {
const int block_start_index = indices[i] * kBlockSize;
const float* vector_block_in_batch_ptr =
vector_in_batch + block_start_index;
for (int c = 0; c < kBlockSize; c++) {
dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++;
}
}
result[batch * m_rows + row] += dot_prod;
}
}
}
void PortableSparseMatrixBatchVectorMultiplyAccumulate(
const float* __restrict__ matrix, const uint8_t* __restrict__ ledger,
int m_rows, int m_cols, const float* __restrict__ vector, int n_batch,
float* __restrict__ result) {
const int kBlockSize = 16;
TFLITE_DCHECK_EQ( // NOLINT
m_cols % kBlockSize, 0);
for (int batch = 0; batch < n_batch; batch++) {
const float* matrix_ptr = matrix;
const uint8_t* ledger_ptr = ledger;
for (int row = 0; row < m_rows; row++) {
float dot_prod = 0.0f;
int num_nonzero_blocks = *ledger_ptr++;
if (num_nonzero_blocks > 0) {
const float* vector_in_batch = vector + batch * m_cols;
for (int i = 0; i < num_nonzero_blocks; i++) {
const int block_start_index = *ledger_ptr++ * kBlockSize;
const float* vector_block_in_batch_ptr =
vector_in_batch + block_start_index;
for (int c = 0; c < kBlockSize; c++) {
dot_prod += *matrix_ptr++ * *vector_block_in_batch_ptr++;
}
}
}
result[batch * m_rows + row] += dot_prod;
}
}
}
void PortableSparseMatrixBatchVectorMultiplyAccumulate(
const int8_t* __restrict__ matrix, const uint8_t* ledger, const int m_rows,
const int m_cols, const int8_t* __restrict__ vectors,
const float* scaling_factors, int n_batch, float* __restrict__ result) {
static const int kBlockSize = 16;
TFLITE_DCHECK_EQ( // NOLINT
m_cols % kBlockSize, 0);
for (int batch = 0; batch < n_batch; ++batch, vectors += m_cols) {
const float batch_scaling_factor = scaling_factors[batch];
const uint8_t* ledger_ptr = ledger;
// Get the address of the first row.
const int8_t* row_ptr = matrix;
for (int row = 0; row < m_rows; ++row) {
// Initialize the dot product sum for the row to 0.
int32_t dotprod = 0;
#if defined(__GNUC__)
// Prefetch the row to cache.
__builtin_prefetch(row_ptr, 0 /* prefetch for read */,
3 /* temporal locality */);
#endif
int num_nonzero_blocks = *ledger_ptr++;
for (int i = 0; i < num_nonzero_blocks; i++) {
const int block_start_index = *ledger_ptr++ * kBlockSize;
const int8_t* vector_block_ptr = vectors + block_start_index;
for (int c = 0; c < kBlockSize; c++) {
dotprod += (*row_ptr++) * (*vector_block_ptr++);
} // for block
} // for num_nonzero_blocks
result[batch * m_rows + row] += dotprod * batch_scaling_factor;
} // for row
} // for batch
}
template <typename T>
void PortableMatrixBatchVectorMultiplyAccumulateImpl(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
T* output) {
const int16_t output_max = std::numeric_limits<T>::max();
const int16_t output_min = std::numeric_limits<T>::min();
for (int batch = 0; batch < n_batch; ++batch) {
for (int row = 0; row < n_output; ++row) {
int32_t acc = bias[row];
for (int col = 0; col < n_input; ++col) {
int8_t input_val = input[batch * n_input + col];
int8_t weights_val = input_to_gate_weights[row * n_input + col];
acc += input_val * weights_val;
}
acc = MultiplyByQuantizedMultiplier(acc, multiplier, shift);
acc += output_zp;
acc += output[batch * n_output + row];
if (acc > output_max) {
acc = output_max;
}
if (acc < output_min) {
acc = output_min;
}
output[batch * n_output + row] = static_cast<T>(acc);
}
}
}
void PortableMatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int16_t* output, CpuBackendContext* context) {
PortableMatrixBatchVectorMultiplyAccumulateImpl(
input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input,
n_output, output_zp, output);
}
void PortableMatrixBatchVectorMultiplyAccumulate(
const int8_t* input, const int32_t* bias,
const int8_t* input_to_gate_weights, int32_t multiplier, int32_t shift,
int32_t n_batch, int32_t n_input, int32_t n_output, int32_t output_zp,
int32_t* scratch, int8_t* output, CpuBackendContext* context) {
PortableMatrixBatchVectorMultiplyAccumulateImpl(
input, bias, input_to_gate_weights, multiplier, shift, n_batch, n_input,
n_output, output_zp, output);
}
void PortableMatrixBatchVectorMultiply(const int8_t* input,
int32_t input_zeropoint,
const int8_t* input_to_gate_weights,
int32_t input_to_gate_effective_scale_a,
int32_t input_to_gate_effective_scale_b,
int32_t n_batch, int32_t n_input,
int32_t n_cell, int8_t* gate_output,
int8_t gate_output_zp) {
const int32_t int8_max = std::numeric_limits<int8_t>::max();
const int32_t int8_min = std::numeric_limits<int8_t>::min();
for (int batch = 0; batch < n_batch; ++batch) {
for (int row = 0; row < n_cell; ++row) {
int32_t acc = 0;
for (int col = 0; col < n_input; ++col) {
int32_t input_val = input[batch * n_input + col];
int8_t weights_val = input_to_gate_weights[row * n_input + col];
acc += (input_val - input_zeropoint) * weights_val;
}
acc = MultiplyByQuantizedMultiplier(acc, input_to_gate_effective_scale_a,
input_to_gate_effective_scale_b);
acc += gate_output_zp;
if (acc > int8_max) {
acc = int8_max;
}
if (acc < int8_min) {
acc = int8_min;
}
gate_output[batch * n_cell + row] = static_cast<int8_t>(acc);
}
}
}
void PortableMatrixBatchVectorMultiply(
const int16_t* hidden, const int8_t* hidden_to_output_weights,
int32_t proj_effective_scale_a, int32_t proj_effective_scale_b,
const int32_t* gate_bias, int32_t n_batch, int32_t n_hidden,
int32_t n_output, int32_t output_zp, int8_t* proj_output) {
const int16_t int8_max = std::numeric_limits<int8_t>::max();
const int16_t int8_min = std::numeric_limits<int8_t>::min();
for (int batch = 0; batch < n_batch; ++batch) {
for (int row = 0; row < n_output; ++row) {
int64_t acc = gate_bias[row];
for (int col = 0; col < n_hidden; ++col) {
int16_t input_val = hidden[batch * n_hidden + col];
int8_t weights_val = hidden_to_output_weights[row * n_hidden + col];
int64_t curr = acc;
acc += input_val * weights_val;
if (input_val * weights_val > 0 && acc < curr) {
acc = std::numeric_limits<int32_t>::max();
}
if (input_val * weights_val < 0 && acc > curr) {
acc = std::numeric_limits<int32_t>::min();
}
}
acc = MultiplyByQuantizedMultiplier(acc, proj_effective_scale_a,
proj_effective_scale_b);
acc += output_zp;
if (acc > int8_max) {
acc = int8_max;
}
if (acc < int8_min) {
acc = int8_min;
}
proj_output[batch * n_output + row] = acc;
}
}
}
void PortableApplyLayerNorm(const int16_t* input,
const int16_t* layer_norm_weights,
const int32_t* bias, int32_t layer_norm_scale_a,
int32_t layer_norm_scale_b, int32_t variance_limit,
int n_batch, int n_input, int16_t* output) {
// The square of std::pow(2, 10), which is the extra factor that makes sure
// normalized values has enough resolution.
static const int kTwoToPower20 = 1 << 20;
for (int i = 0; i < n_batch; ++i) {
int64_t sum = 0;
int64_t sum_sq = 0;
for (int j = 0; j < n_input; ++j) {
const int32_t index = i * n_input + j;
int32_t val = static_cast<int32_t>(input[index]);
sum += val;
sum_sq += val * val;
}
int32_t mean =
static_cast<int32_t>(static_cast<int64_t>(sum) * 1024 / n_input);
// TODO(b/173994730): Avoids overflow but only works for POT n_input.
int32_t temp = kTwoToPower20 / n_input;
int64_t variance =
sum_sq * temp - static_cast<int64_t>(mean) * static_cast<int64_t>(mean);
int32_t variance2 = static_cast<int32_t>(variance / kTwoToPower20);
if (variance2 < 1) {
variance2 = variance_limit;
}
int32_t stddev_inverse_a;
int stddev_inverse_b;
GetInvSqrtQuantizedMultiplierExp(variance2, /*reverse_shift*/ -1,
&stddev_inverse_a, &stddev_inverse_b);
for (int j = 0; j < n_input; ++j) {
const int32_t index = i * n_input + j;
int32_t val = static_cast<int32_t>(input[index]);
int32_t shifted = 1024 * val - mean;
int32_t rescaled = MultiplyByQuantizedMultiplier(
shifted, stddev_inverse_a, stddev_inverse_b);
// TODO(jianlijianli): Saturate this.
int64_t val3 = rescaled * layer_norm_weights[j] + bias[j];
int32_t val4 =
static_cast<int32_t>((val3 > 0 ? val3 + 512 : val3 - 512) / 1024);
int32_t val5 = MultiplyByQuantizedMultiplier(val4, layer_norm_scale_a,
layer_norm_scale_b + 12);
val5 = std::min(std::max(kInt16Min, val5), kInt16Max);
output[index] = static_cast<int16_t>(val5);
}
}
}
void PortableApplyLayerNormFloat(const int16_t* input,
const int16_t* layer_norm_weights,
int32_t layer_norm_scale_a,
int32_t layer_norm_scale_b,
const int32_t* bias, int n_batch, int n_input,
int16_t* output) {
const int32_t int16_max = std::numeric_limits<int16_t>::max();
const int32_t int16_min = std::numeric_limits<int16_t>::min();
// This is to surpress a lint warning.
const double two = 2.0;
const float layer_norm_scale =
layer_norm_scale_a *
std::pow(two, static_cast<double>(layer_norm_scale_b - 31));
const float bias_scale = std::pow(two, -10) * layer_norm_scale;
for (int batch = 0; batch < n_batch; ++batch) {
float sum = 0.0f;
float sum_sq = 0.0f;
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
const float value = static_cast<float>(input[index]);
sum += value;
sum_sq += value * value;
}
const float mean = sum / n_input;
float stddev_inv = 0.0f;
const float variance = sum_sq / n_input - mean * mean;
if (variance == 0) {
stddev_inv = 1.0f / sqrt(1e-8);
} else {
stddev_inv = 1.0f / sqrt(variance);
}
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
const float normalized_value =
(static_cast<float>(input[index]) - mean) * stddev_inv;
const float weighted_normalized_value =
normalized_value * layer_norm_weights[i] * layer_norm_scale +
bias[i] * bias_scale;
const int32_t quant_output = static_cast<int32_t>(
std::round(weighted_normalized_value * std::pow(2, 12)));
output[index] = std::min(int16_max, std::max(int16_min, quant_output));
}
}
}
void PortableMatrixScalarMultiplyAccumulate(const int8_t* matrix,
int32_t scalar, int32_t n_row,
int32_t n_col, int32_t* output) {
for (int i = 0; i < n_row; ++i) {
int32_t row_sum = 0;
for (int j = 0; j < n_col; ++j) {
row_sum += *matrix++;
}
output[i] += row_sum * scalar;
}
}
void PortableApplySigmoid(const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
for (int c = 0; c < n_input; c++) {
using F3 = gemmlowp::FixedPoint<std::int16_t, 3>;
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
const int index = batch * n_input + c;
F3 sigmoid_input = F3::FromRaw(input[index]);
F0 sigmoid_output = gemmlowp::logistic(sigmoid_input);
output[index] = sigmoid_output.raw();
}
}
}
void PortableApplySigmoidFloat(const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output) {
const int32_t int16_max = std::numeric_limits<int16_t>::max();
const int32_t int16_min = std::numeric_limits<int16_t>::min();
for (int batch = 0; batch < n_batch; ++batch) {
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
const float float_input = input[index] * std::pow(2, -12);
const float float_output = 1.0f / (1.0f + std::exp(-float_input));
const int32_t quant_output =
static_cast<int32_t>(float_output * std::pow(2, 15));
const int32_t quant_output_clamped =
std::min(int16_max, std::max(int16_min, quant_output));
output[index] = static_cast<int16_t>(quant_output_clamped);
}
}
}
template <int IntegerBits>
void PortableApplyTanhImpl(const int16_t* input, int32_t n_batch,
int32_t n_input, int16_t* output) {
using FX = gemmlowp::FixedPoint<std::int16_t, IntegerBits>;
using F0 = gemmlowp::FixedPoint<std::int16_t, 0>;
for (int batch = 0; batch < n_batch; ++batch) {
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
FX tanh_input = FX::FromRaw(input[index]);
F0 tanh_output = gemmlowp::tanh(tanh_input);
output[index] = tanh_output.raw();
}
}
}
void PortableApplyTanh(int32_t integer_bits, const int16_t* input,
int32_t n_batch, int32_t n_input, int16_t* output) {
assert(integer_bits <= 6);
#define DISPATCH_TANH(i) \
case i: \
PortableApplyTanhImpl<i>(input, n_batch, n_input, output); \
break;
switch (integer_bits) {
DISPATCH_TANH(0);
DISPATCH_TANH(1);
DISPATCH_TANH(2);
DISPATCH_TANH(3);
DISPATCH_TANH(4);
DISPATCH_TANH(5);
DISPATCH_TANH(6);
default:
return;
}
#undef DISPATCH_TANH
}
void PortableApplyTanhFloat(const int16_t* input, int32_t n_batch,
int32_t n_input, int32_t integer_bits,
int16_t* output) {
const int32_t int16_max = std::numeric_limits<int16_t>::max();
const int32_t int16_min = std::numeric_limits<int16_t>::min();
const double two = 2.0;
for (int batch = 0; batch < n_batch; ++batch) {
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
const float float_input =
input[index] * std::pow(two, static_cast<double>(integer_bits));
const float float_output = std::tanh(float_input);
const int32_t quant_output =
static_cast<int32_t>(float_output * std::pow(2, 15));
const int32_t quant_output_clamped =
std::min(int16_max, std::max(int16_min, quant_output));
output[index] = static_cast<int16_t>(quant_output_clamped);
}
}
}
void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
int n_batch, int n_input, int shift, int16_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
const int16_t a = input_1[index];
const int16_t b = input_2[index];
const int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
output[index] =
static_cast<int16_t>(gemmlowp::RoundingDivideByPOT(value, shift));
}
}
}
void PortableCwiseMul(const int16_t* input_1, const int16_t* input_2,
int32_t multiplier, int32_t shift, int32_t n_batch,
int32_t n_input, int32_t output_zp, int8_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
const int16_t a = input_1[index];
const int16_t b = input_2[index];
int32_t value = static_cast<int32_t>(a) * static_cast<int32_t>(b);
value = MultiplyByQuantizedMultiplier(value, multiplier, shift);
value -= output_zp;
value = std::min(std::max(static_cast<int32_t>(-128), value),
static_cast<int32_t>(127));
output[index] = static_cast<int8_t>(value);
}
}
}
void PortableCwiseAdd(const int16_t* input_1, const int16_t* input_2,
int n_batch, int n_input, int16_t* output) {
for (int batch = 0; batch < n_batch; ++batch) {
for (int i = 0; i < n_input; ++i) {
const int index = batch * n_input + i;
int32_t sum = input_1[index] + input_2[index];
const int32_t sum_clamped = std::min(kInt16Max, std::max(kInt16Min, sum));
output[index] = static_cast<int16_t>(sum_clamped);
}
}
}
float PortableVectorVectorDotProduct(const float* vector1, const float* vector2,
int v_size) {
float result = 0.0;
for (int v = 0; v < v_size; v++) {
result += *vector1++ * *vector2++;
}
return result;
}
namespace {
inline int32_t VectorVectorDotProduct(const int16_t* vector1,
const int16_t* vector2, int v_size) {
int32_t result = 0;
for (int v = 0; v < v_size; v++) {
result += *vector1++ * *vector2++;
}
return result;
}
} // namespace
void PortableBatchVectorBatchVectorDotProduct(const int16_t* vector1,
const int16_t* vector2,
int v_size, int n_batch,
int32_t* result) {
for (int b = 0; b < n_batch; b++) {
result[b] = VectorVectorDotProduct(vector1, vector2, v_size);
vector1 += v_size;
vector2 += v_size;
}
}
void PortableVectorBatchVectorCwiseProductAccumulate(
const int16_t* vector, int v_size, const int16_t* batch_vector, int n_batch,
int32_t multiplier, int shift, int16_t* result) {
for (int b = 0; b < n_batch; b++) {
for (int v = 0; v < v_size; v++) {
int32_t prod = vector[v] * *batch_vector++;
prod = MultiplyByQuantizedMultiplier(prod, multiplier, shift);
int32_t output = prod + *result;
output = std::max(std::min(static_cast<int32_t>(32767), output),
static_cast<int32_t>(-32768));
*result++ = output;
}
}
}
void PortableSub1Vector(const float* vector, int v_size, float* result) {
for (int v = 0; v < v_size; v++) {
*result++ = 1.0f - *vector++;
}
}
void PortableSub1Vector(const int16_t* vector, int v_size, int16_t* result) {
static const int16_t kOne = 32767;
for (int v = 0; v < v_size; v++) {
*result++ = kOne - *vector++;
}
}
void PortableVectorScalarMultiply(const int8_t* vector, const int v_size,
const float scale, float* result) {
for (int v = 0; v < v_size; ++v) {
*result++ = scale * *vector++;
}
}
void PortableMeanStddevNormalization(const float* __restrict__ input_vector,
float* __restrict__ output_vector,
int v_size, int n_batch) {
for (int batch = 0; batch < n_batch; ++batch) {
float sum = 0.0f;
for (int i = 0; i < v_size; ++i) {
sum += input_vector[i];
}
const float mean = sum / v_size;
float sum_diff_sq = 0.0f;
for (int i = 0; i < v_size; ++i) {
const float diff = input_vector[i] - mean;
sum_diff_sq += diff * diff;
}
const float variance = sum_diff_sq / v_size;
constexpr float kNormalizationConstant = 1e-8f;
const float stddev_inv =
1.0f / std::sqrt(variance + kNormalizationConstant);
for (int i = 0; i < v_size; ++i) {
output_vector[i] = (input_vector[i] - mean) * stddev_inv;
}
input_vector += v_size;
output_vector += v_size;
}
}
void PortableTwoGateSaturatingAdd(const int8_t* input, int8_t input_zp,
const int8_t* recurrent, int8_t recurrent_zp,
int32_t input_effective_scale_a,
int32_t input_effective_scale_b,
int32_t recurrent_effective_scale_a,
int32_t recurrent_effective_scale_b,
int32_t n_batch, int32_t n_cell,
int16_t* output) {
const int32_t int16_max = std::numeric_limits<int16_t>::max();
const int32_t int16_min = std::numeric_limits<int16_t>::min();
for (int i = 0; i < n_batch * n_cell; ++i) {
int32_t x = static_cast<int32_t>(input[i]) - static_cast<int32_t>(input_zp);
int32_t h =
static_cast<int32_t>(recurrent[i]) - static_cast<int32_t>(recurrent_zp);
int32_t x_scaled = MultiplyByQuantizedMultiplier(x, input_effective_scale_a,
input_effective_scale_b);
int32_t h_scaled = MultiplyByQuantizedMultiplier(
h, recurrent_effective_scale_a, recurrent_effective_scale_b);
int32_t y = h_scaled + x_scaled;
if (y > int16_max) {
y = int16_max;
}
if (y < int16_min) {
y = int16_min;
}
output[i] = static_cast<int16_t>(y);
}
}
} // namespace tensor_utils
} // namespace tflite
| YifuLiu/AliOS-Things | components/ai_agent/src/engine/tflite-micro/tensorflow/lite/kernels/internal/reference/portable_tensor_utils.cc | C++ | apache-2.0 | 30,655 |