repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__scalar_x2(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
int32_t vx0 = (int32_t) input[0];
int32_t vx1 = (int32_t) input[1];
input += 2;
vx0 -= vzero_point;
vx1 -= vzero_point;
float vy0 = (float) vx0;
float vy1 = (float) vx1;
vy0 *= vscale;
vy1 *= vscale;
output[0] = vy0;
output[1] = vy1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vx = *input;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output = vy;
}
}
| 1,301
| 21.448276
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-scalar-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__scalar_x3(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
for (; batch >= 3 * sizeof(int8_t); batch -= 3 * sizeof(int8_t)) {
int32_t vx0 = (int32_t) input[0];
int32_t vx1 = (int32_t) input[1];
int32_t vx2 = (int32_t) input[2];
input += 3;
vx0 -= vzero_point;
vx1 -= vzero_point;
vx2 -= vzero_point;
float vy0 = (float) vx0;
float vy1 = (float) vx1;
float vy2 = (float) vx2;
vy0 *= vscale;
vy1 *= vscale;
vy2 *= vscale;
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output += 3;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vx = *input++;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output++ = vy;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 1,513
| 21.597015
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/common.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__scalar_x4(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vzero_point = params->scalar.zero_point;
const float vscale = params->scalar.scale;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
int32_t vx0 = (int32_t) input[0];
int32_t vx1 = (int32_t) input[1];
int32_t vx2 = (int32_t) input[2];
int32_t vx3 = (int32_t) input[3];
input += 4;
vx0 -= vzero_point;
vx1 -= vzero_point;
vx2 -= vzero_point;
vx3 -= vzero_point;
float vy0 = (float) vx0;
float vy1 = (float) vx1;
float vy2 = (float) vx2;
float vy3 = (float) vx3;
vy0 *= vscale;
vy1 *= vscale;
vy2 *= vscale;
vy3 *= vscale;
output[0] = vy0;
output[1] = vy1;
output[2] = vy2;
output[3] = vy3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vx = *input++;
vx -= vzero_point;
float vy = (float) vx;
vy *= vscale;
*output++ = vy;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 1,644
| 21.847222
| 76
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse2_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input);
__m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (input + 8));
input += 16;
vx01234567 = _mm_xor_si128(vx01234567, vsign_mask);
vx89ABCDEF = _mm_xor_si128(vx89ABCDEF, vsign_mask);
vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
__m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
__m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
__m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,861
| 32.582609
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse2-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse2_x24(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
__m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input);
__m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (input + 8));
__m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input + 16));
input += 24;
vx01234567 = _mm_xor_si128(vx01234567, vsign_mask);
vx89ABCDEF = _mm_xor_si128(vx89ABCDEF, vsign_mask);
vxGHIJKLMN = _mm_xor_si128(vxGHIJKLMN, vsign_mask);
vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
__m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
__m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
__m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
__m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,463
| 34.428571
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse2_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vx01234567 = _mm_loadl_epi64((const __m128i*) input);
__m128i vx89ABCDEF = _mm_loadl_epi64((const __m128i*) (input + 8));
__m128i vxGHIJKLMN = _mm_loadl_epi64((const __m128i*) (input + 16));
__m128i vxOPQRSTUV = _mm_loadl_epi64((const __m128i*) (input + 24));
input += 32;
vx01234567 = _mm_xor_si128(vx01234567, vsign_mask);
vx89ABCDEF = _mm_xor_si128(vx89ABCDEF, vsign_mask);
vxGHIJKLMN = _mm_xor_si128(vxGHIJKLMN, vsign_mask);
vxOPQRSTUV = _mm_xor_si128(vxOPQRSTUV, vsign_mask);
vx01234567 = _mm_unpacklo_epi8(vx01234567, vzero);
vx89ABCDEF = _mm_unpacklo_epi8(vx89ABCDEF, vzero);
vxGHIJKLMN = _mm_unpacklo_epi8(vxGHIJKLMN, vzero);
vxOPQRSTUV = _mm_unpacklo_epi8(vxOPQRSTUV, vzero);
__m128 vy0123 = _mm_castsi128_ps(_mm_unpacklo_epi16(vx01234567, vmagic_exp));
__m128 vy4567 = _mm_castsi128_ps(_mm_unpackhi_epi16(vx01234567, vmagic_exp));
__m128 vy89AB = _mm_castsi128_ps(_mm_unpacklo_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyCDEF = _mm_castsi128_ps(_mm_unpackhi_epi16(vx89ABCDEF, vmagic_exp));
__m128 vyGHIJ = _mm_castsi128_ps(_mm_unpacklo_epi16(vxGHIJKLMN, vmagic_exp));
__m128 vyKLMN = _mm_castsi128_ps(_mm_unpackhi_epi16(vxGHIJKLMN, vmagic_exp));
__m128 vyOPQR = _mm_castsi128_ps(_mm_unpacklo_epi16(vxOPQRSTUV, vmagic_exp));
__m128 vySTUV = _mm_castsi128_ps(_mm_unpackhi_epi16(vxOPQRSTUV, vmagic_exp));
vy0123 = _mm_sub_ps(vy0123, vmagic_bias);
vy4567 = _mm_sub_ps(vy4567, vmagic_bias);
vy89AB = _mm_sub_ps(vy89AB, vmagic_bias);
vyCDEF = _mm_sub_ps(vyCDEF, vmagic_bias);
vyGHIJ = _mm_sub_ps(vyGHIJ, vmagic_bias);
vyKLMN = _mm_sub_ps(vyKLMN, vmagic_bias);
vyOPQR = _mm_sub_ps(vyOPQR, vmagic_bias);
vySTUV = _mm_sub_ps(vySTUV, vmagic_bias);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
vyOPQR = _mm_mul_ps(vyOPQR, vscale);
vySTUV = _mm_mul_ps(vySTUV, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
_mm_storeu_ps(output + 24, vyOPQR);
_mm_storeu_ps(output + 28, vySTUV);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 5,065
| 35.978102
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse2-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse2_x8(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
const __m128i vmagic_exp = _mm_load_si128((const __m128i*) params->sse2.magic_exp);
const __m128 vmagic_bias = _mm_load_ps(params->sse2.magic_bias);
const __m128 vscale = _mm_load_ps(params->sse2.scale);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
input += 8;
__m128 vy_lo = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
__m128 vy_hi = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy_lo = _mm_sub_ps(vy_lo, vmagic_bias);
vy_hi = _mm_sub_ps(vy_hi, vmagic_bias);
vy_lo = _mm_mul_ps(vy_lo, vscale);
vy_hi = _mm_mul_ps(vy_hi, vscale);
_mm_storeu_ps(output, vy_lo);
_mm_storeu_ps(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vx = _mm_loadl_epi64((const __m128i*) input);
vx = _mm_xor_si128(vx, vsign_mask);
vx = _mm_unpacklo_epi8(vx, vzero);
__m128 vy = _mm_castsi128_ps(_mm_unpacklo_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_ps(output, vy);
vy = _mm_castsi128_ps(_mm_unpackhi_epi16(vx, vmagic_exp));
vy = _mm_sub_ps(vy, vmagic_bias);
vy = _mm_mul_ps(vy, vscale);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,555
| 29.795181
| 90
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse41_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
input += 16;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
__m128 vy89AB = _mm_cvtepi32_ps(vx89AB);
__m128 vyCDEF = _mm_cvtepi32_ps(vxCDEF);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,977
| 31.725275
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse41-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse41_x24(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
input += 24;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
__m128 vy89AB = _mm_cvtepi32_ps(vx89AB);
__m128 vyCDEF = _mm_cvtepi32_ps(vxCDEF);
__m128 vyGHIJ = _mm_cvtepi32_ps(vxGHIJ);
__m128 vyKLMN = _mm_cvtepi32_ps(vxKLMN);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 3,533
| 33.990099
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse41_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
__m128i vx89AB = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 8)));
__m128i vxCDEF = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 12)));
__m128i vxGHIJ = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 16)));
__m128i vxKLMN = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 20)));
__m128i vxOPQR = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 24)));
__m128i vxSTUV = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 28)));
input += 32;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
vx89AB = _mm_add_epi32(vx89AB, vminus_zero_point);
vxCDEF = _mm_add_epi32(vxCDEF, vminus_zero_point);
vxGHIJ = _mm_add_epi32(vxGHIJ, vminus_zero_point);
vxKLMN = _mm_add_epi32(vxKLMN, vminus_zero_point);
vxOPQR = _mm_add_epi32(vxOPQR, vminus_zero_point);
vxSTUV = _mm_add_epi32(vxSTUV, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
__m128 vy89AB = _mm_cvtepi32_ps(vx89AB);
__m128 vyCDEF = _mm_cvtepi32_ps(vxCDEF);
__m128 vyGHIJ = _mm_cvtepi32_ps(vxGHIJ);
__m128 vyKLMN = _mm_cvtepi32_ps(vxKLMN);
__m128 vyOPQR = _mm_cvtepi32_ps(vxOPQR);
__m128 vySTUV = _mm_cvtepi32_ps(vxSTUV);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
vy89AB = _mm_mul_ps(vy89AB, vscale);
vyCDEF = _mm_mul_ps(vyCDEF, vscale);
vyGHIJ = _mm_mul_ps(vyGHIJ, vscale);
vyKLMN = _mm_mul_ps(vyKLMN, vscale);
vyOPQR = _mm_mul_ps(vyOPQR, vscale);
vySTUV = _mm_mul_ps(vySTUV, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
_mm_storeu_ps(output + 8, vy89AB);
_mm_storeu_ps(output + 12, vyCDEF);
_mm_storeu_ps(output + 16, vyGHIJ);
_mm_storeu_ps(output + 20, vyKLMN);
_mm_storeu_ps(output + 24, vyOPQR);
_mm_storeu_ps(output + 28, vySTUV);
output += 32;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 4,089
| 35.846847
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__sse41_x8(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vminus_zero_point = _mm_load_si128((const __m128i*) params->sse4.minus_zero_point);
const __m128 vscale = _mm_load_ps(params->sse4.scale);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vx0123 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
__m128i vx4567 = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input + 4)));
input += 8;
vx0123 = _mm_add_epi32(vx0123, vminus_zero_point);
vx4567 = _mm_add_epi32(vx4567, vminus_zero_point);
__m128 vy0123 = _mm_cvtepi32_ps(vx0123);
__m128 vy4567 = _mm_cvtepi32_ps(vx4567);
vy0123 = _mm_mul_ps(vy0123, vscale);
vy4567 = _mm_mul_ps(vy4567, vscale);
_mm_storeu_ps(output, vy0123);
_mm_storeu_ps(output + 4, vy4567);
output += 8;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
input += 4;
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
_mm_storeu_ps(output, vy);
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 3 * sizeof(int8_t));
__m128i vx = _mm_cvtepi8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input)));
vx = _mm_add_epi32(vx, vminus_zero_point);
__m128 vy = _mm_cvtepi32_ps(vx);
vy = _mm_mul_ps(vy, vscale);
if (batch & (2 * sizeof(int8_t))) {
_mm_storel_pi((__m64*) output, vy);
vy = _mm_movehl_ps(vy, vy);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
_mm_store_ss(output, vy);
}
}
}
| 2,418
| 28.864198
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__wasmsimd_x16(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vx01234567 = wasm_i16x8_load8x8(input);
v128_t vx89ABCDEF = wasm_i16x8_load8x8(input + 8);
input += 16;
vx01234567 = wasm_i16x8_add(vx01234567, vminus_zero_point);
vx89ABCDEF = wasm_i16x8_add(vx89ABCDEF, vminus_zero_point);
v128_t vy0123 = wasm_i32x4_extend_low_i16x8(vx01234567);
v128_t vy4567 = wasm_i32x4_extend_high_i16x8(vx01234567);
v128_t vy89AB = wasm_i32x4_extend_low_i16x8(vx89ABCDEF);
v128_t vyCDEF = wasm_i32x4_extend_high_i16x8(vx89ABCDEF);
vy0123 = wasm_f32x4_convert_i32x4(vy0123);
vy4567 = wasm_f32x4_convert_i32x4(vy4567);
vy89AB = wasm_f32x4_convert_i32x4(vy89AB);
vyCDEF = wasm_f32x4_convert_i32x4(vyCDEF);
vy0123 = wasm_f32x4_mul(vy0123, vscale);
vy4567 = wasm_f32x4_mul(vy4567, vscale);
vy89AB = wasm_f32x4_mul(vy89AB, vscale);
vyCDEF = wasm_f32x4_mul(vyCDEF, vscale);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 3,405
| 30.831776
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-wasmsimd-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__wasmsimd_x24(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 24 * sizeof(int8_t); batch -= 24 * sizeof(int8_t)) {
v128_t vx01234567 = wasm_i16x8_load8x8(input);
v128_t vx89ABCDEF = wasm_i16x8_load8x8(input + 8);
v128_t vxGHIJKLMN = wasm_i16x8_load8x8(input + 16);
input += 24;
vx01234567 = wasm_i16x8_add(vx01234567, vminus_zero_point);
vx89ABCDEF = wasm_i16x8_add(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = wasm_i16x8_add(vxGHIJKLMN, vminus_zero_point);
v128_t vy0123 = wasm_i32x4_extend_low_i16x8(vx01234567);
v128_t vy4567 = wasm_i32x4_extend_high_i16x8(vx01234567);
v128_t vy89AB = wasm_i32x4_extend_low_i16x8(vx89ABCDEF);
v128_t vyCDEF = wasm_i32x4_extend_high_i16x8(vx89ABCDEF);
v128_t vyGHIJ = wasm_i32x4_extend_low_i16x8(vxGHIJKLMN);
v128_t vyKLMN = wasm_i32x4_extend_high_i16x8(vxGHIJKLMN);
vy0123 = wasm_f32x4_convert_i32x4(vy0123);
vy4567 = wasm_f32x4_convert_i32x4(vy4567);
vy89AB = wasm_f32x4_convert_i32x4(vy89AB);
vyCDEF = wasm_f32x4_convert_i32x4(vyCDEF);
vyGHIJ = wasm_f32x4_convert_i32x4(vyGHIJ);
vyKLMN = wasm_f32x4_convert_i32x4(vyKLMN);
vy0123 = wasm_f32x4_mul(vy0123, vscale);
vy4567 = wasm_f32x4_mul(vy4567, vscale);
vy89AB = wasm_f32x4_mul(vy89AB, vscale);
vyCDEF = wasm_f32x4_mul(vyCDEF, vscale);
vyGHIJ = wasm_f32x4_mul(vyGHIJ, vscale);
vyKLMN = wasm_f32x4_mul(vyKLMN, vscale);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
output += 24;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 3,916
| 32.478632
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__wasmsimd_x32(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vx01234567 = wasm_i16x8_load8x8(input);
v128_t vx89ABCDEF = wasm_i16x8_load8x8(input + 8);
v128_t vxGHIJKLMN = wasm_i16x8_load8x8(input + 16);
v128_t vxOPQRSTUV = wasm_i16x8_load8x8(input + 24);
input += 32;
vx01234567 = wasm_i16x8_add(vx01234567, vminus_zero_point);
vx89ABCDEF = wasm_i16x8_add(vx89ABCDEF, vminus_zero_point);
vxGHIJKLMN = wasm_i16x8_add(vxGHIJKLMN, vminus_zero_point);
vxOPQRSTUV = wasm_i16x8_add(vxOPQRSTUV, vminus_zero_point);
v128_t vy0123 = wasm_i32x4_extend_low_i16x8(vx01234567);
v128_t vy4567 = wasm_i32x4_extend_high_i16x8(vx01234567);
v128_t vy89AB = wasm_i32x4_extend_low_i16x8(vx89ABCDEF);
v128_t vyCDEF = wasm_i32x4_extend_high_i16x8(vx89ABCDEF);
v128_t vyGHIJ = wasm_i32x4_extend_low_i16x8(vxGHIJKLMN);
v128_t vyKLMN = wasm_i32x4_extend_high_i16x8(vxGHIJKLMN);
v128_t vyOPQR = wasm_i32x4_extend_low_i16x8(vxOPQRSTUV);
v128_t vySTUV = wasm_i32x4_extend_high_i16x8(vxOPQRSTUV);
vy0123 = wasm_f32x4_convert_i32x4(vy0123);
vy4567 = wasm_f32x4_convert_i32x4(vy4567);
vy89AB = wasm_f32x4_convert_i32x4(vy89AB);
vyCDEF = wasm_f32x4_convert_i32x4(vyCDEF);
vyGHIJ = wasm_f32x4_convert_i32x4(vyGHIJ);
vyKLMN = wasm_f32x4_convert_i32x4(vyKLMN);
vyOPQR = wasm_f32x4_convert_i32x4(vyOPQR);
vySTUV = wasm_f32x4_convert_i32x4(vySTUV);
vy0123 = wasm_f32x4_mul(vy0123, vscale);
vy4567 = wasm_f32x4_mul(vy4567, vscale);
vy89AB = wasm_f32x4_mul(vy89AB, vscale);
vyCDEF = wasm_f32x4_mul(vyCDEF, vscale);
vyGHIJ = wasm_f32x4_mul(vyGHIJ, vscale);
vyKLMN = wasm_f32x4_mul(vyKLMN, vscale);
vyOPQR = wasm_f32x4_mul(vyOPQR, vscale);
vySTUV = wasm_f32x4_mul(vySTUV, vscale);
wasm_v128_store(output, vy0123);
wasm_v128_store(output + 4, vy4567);
wasm_v128_store(output + 8, vy89AB);
wasm_v128_store(output + 12, vyCDEF);
wasm_v128_store(output + 16, vyGHIJ);
wasm_v128_store(output + 20, vyKLMN);
wasm_v128_store(output + 24, vyOPQR);
wasm_v128_store(output + 28, vySTUV);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 4,427
| 33.866142
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-f32-vcvt/gen/qs8-f32-vcvt-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-f32-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_f32_vcvt_ukernel__wasmsimd_x8(
size_t batch,
const int8_t* input,
float* output,
const union xnn_qs8_f32_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vminus_zero_point = wasm_v128_load64_splat(params->wasmsimd.minus_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->wasmsimd.scale);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy_lo = wasm_i32x4_extend_low_i16x8(vx);
v128_t vy_hi = wasm_i32x4_extend_high_i16x8(vx);
vy_lo = wasm_f32x4_convert_i32x4(vy_lo);
vy_hi = wasm_f32x4_convert_i32x4(vy_hi);
vy_lo = wasm_f32x4_mul(vy_lo, vscale);
vy_hi = wasm_f32x4_mul(vy_hi, vscale);
wasm_v128_store(output, vy_lo);
wasm_v128_store(output + 4, vy_hi);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vx = wasm_i16x8_load8x8(input);
vx = wasm_i16x8_add(vx, vminus_zero_point);
input += 8;
v128_t vy = wasm_i32x4_extend_low_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store(output, vy); output += 4;
vy = wasm_i32x4_extend_high_i16x8(vx);
vy = wasm_f32x4_convert_i32x4(vy);
vy = wasm_f32x4_mul(vy, vscale);
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vy, 0);
vy = wasm_v64x2_shuffle(vy, vy, 1, 1);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
}
}
}
| 2,280
| 28.24359
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 16; channels -= 16) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 13,920
| 43.053797
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
const int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
const int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 24; channels -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 20,213
| 45.150685
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neon-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
const int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
const int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
const int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
const int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(b + 24);
int32x4_t vaccSTUV = vld1q_s32(b + 28);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vaccOPQR = vaddw_s16(vaccOPQR, vget_low_s16(vsumOPQRSTUV));
vaccSTUV = vaddw_s16(vaccSTUV, vget_high_s16(vsumOPQRSTUV));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 32; channels -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccSTUV = vld1q_s32(buffer); buffer += 4;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vaccOPQR = vaddw_s16(vaccOPQR, vget_low_s16(vsumOPQRSTUV));
vaccSTUV = vaddw_s16(vaccSTUV, vget_high_s16(vsumOPQRSTUV));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vaccOPQR = vreinterpretq_s32_f32(vaddq_f32(vfpaccOPQR, vmagic_bias));
vaccSTUV = vreinterpretq_s32_f32(vaddq_f32(vfpaccSTUV, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
vaccOPQR = vqsubq_s32(vaccOPQR, vmagic_bias_less_output_zero_point);
vaccSTUV = vqsubq_s32(vaccSTUV, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 24,025
| 46.956088
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neon_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 8; channels -= 8) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vi2x01234567 = vld1_s8(i2);
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4);
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6);
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 9,766
| 38.54251
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neonv8-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 16; channels -= 16) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 13,428
| 42.180064
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neonv8-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
const int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
const int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 24; channels -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 19,578
| 44.321759
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neonv8-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
const int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
const int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
const int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
const int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(b + 24);
int32x4_t vaccSTUV = vld1q_s32(b + 28);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vaccOPQR = vaddw_s16(vaccOPQR, vget_low_s16(vsumOPQRSTUV));
vaccSTUV = vaddw_s16(vaccSTUV, vget_high_s16(vsumOPQRSTUV));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 32; channels -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccSTUV = vld1q_s32(buffer); buffer += 4;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vaccOPQR = vaddw_s16(vaccOPQR, vget_low_s16(vsumOPQRSTUV));
vaccSTUV = vaddw_s16(vaccSTUV, vget_high_s16(vsumOPQRSTUV));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
vaccOPQR = vcvtnq_s32_f32(vfpaccOPQR);
vaccSTUV = vcvtnq_s32_f32(vfpaccSTUV);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 23,247
| 46.060729
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-neonv8-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__neonv8_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 8; channels -= 8) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vi2x01234567 = vld1_s8(i2);
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4);
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6);
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 9,417
| 37.757202
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-fmagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 1) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
| 4,875
| 30.25641
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-fmagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c2(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 2) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 2; channels -= 2) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
buffer += 2;
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = *buffer;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (int8_t) vout;
}
}
| 7,945
| 29.328244
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-fmagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_fmagic_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 4) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = b[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = b[3];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 4; channels -= 4) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = buffer[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = buffer[3];
const int32_t vi0x3 = (int32_t) i0[3];
buffer += 4;
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
}
| 11,639
| 30.630435
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-imagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 1) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
| 4,759
| 29.318471
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-imagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c2(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 2) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 2; channels -= 2) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
buffer += 2;
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = *buffer;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output = (int8_t) vout;
}
}
| 7,756
| 28.161654
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-imagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 4) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = b[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = b[3];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 4; channels -= 4) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = buffer[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = buffer[3];
const int32_t vi0x3 = (int32_t) i0[3];
buffer += 4;
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
}
| 11,378
| 29.425134
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-lrintf-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 1) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
*b++ = vacc;
} while (--c != 0);
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
| 4,779
| 29.641026
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-lrintf-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c2(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 2) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 2) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
b[0] = vacc0;
b[1] = vacc1;
b += 2;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 2; channels -= 2) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
buffer += 2;
i0 += 2;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = *buffer;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output = (int8_t) vout;
}
}
| 7,821
| 28.854962
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-scalar-lrintf-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__scalar_lrintf_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 4) * sizeof(int8_t);
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
for (ptrdiff_t c = (ptrdiff_t) channels; c > 0; c -= 4) {
int32_t vacc0 = b[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = b[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = b[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = b[3];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
b[0] = vacc0;
b[1] = vacc1;
b[2] = vacc2;
b[3] = vacc3;
b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 4; channels -= 4) {
int32_t vacc0 = buffer[0];
const int32_t vi0x0 = (int32_t) i0[0];
int32_t vacc1 = buffer[1];
const int32_t vi0x1 = (int32_t) i0[1];
int32_t vacc2 = buffer[2];
const int32_t vi0x2 = (int32_t) i0[2];
int32_t vacc3 = buffer[3];
const int32_t vi0x3 = (int32_t) i0[3];
buffer += 4;
i0 += 4;
vacc0 += vi0x0;
const int32_t vi1x0 = (int32_t) i1[0];
vacc1 += vi0x1;
const int32_t vi1x1 = (int32_t) i1[1];
vacc2 += vi0x2;
const int32_t vi1x2 = (int32_t) i1[2];
vacc3 += vi0x3;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
int32_t vout2 = vrndacc2 + voutput_zero_point;
int32_t vout3 = vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = *buffer++;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
}
| 11,487
| 30.217391
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-sse2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
buffer += 16;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (int8_t) vout0123;
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 21,082
| 47.690531
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-sse2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse2_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (int8_t) vout0123;
}
}
}
}
| 13,902
| 40.377976
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-sse41-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
b += 16;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
buffer += 16;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 16,477
| 46.2149
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-sse41-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
__m128i vaccGHIJ = _mm_cvtepi16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_srai_epi32(_mm_unpackhi_epi16(vaccGHIJKLMN, vaccGHIJKLMN), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
_mm_store_si128((__m128i*) (b + 16), vaccGHIJ);
_mm_store_si128((__m128i*) (b + 20), vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
__m128i vaccGHIJ = _mm_cvtepi16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_srai_epi32(_mm_unpackhi_epi16(vaccGHIJKLMN, vaccGHIJKLMN), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (b + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (b + 12)));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_load_si128((const __m128i*) (b + 16)));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_load_si128((const __m128i*) (b + 20)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
_mm_store_si128((__m128i*) (b + 8), vacc89AB);
_mm_store_si128((__m128i*) (b + 12), vaccCDEF);
_mm_store_si128((__m128i*) (b + 16), vaccGHIJ);
_mm_store_si128((__m128i*) (b + 20), vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 24; channels -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
__m128i vaccGHIJ = _mm_cvtepi16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_srai_epi32(_mm_unpackhi_epi16(vaccGHIJKLMN, vaccGHIJKLMN), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_load_si128((const __m128i*) (buffer + 8)));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_load_si128((const __m128i*) (buffer + 12)));
vaccGHIJ = _mm_add_epi32(vaccGHIJ, _mm_load_si128((const __m128i*) (buffer + 16)));
vaccKLMN = _mm_add_epi32(vaccKLMN, _mm_load_si128((const __m128i*) (buffer + 20)));
buffer += 24;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
__m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
__m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 24,931
| 49.469636
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-sse41-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__sse41_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) b));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (b + 4)));
_mm_store_si128((__m128i*) b, vacc0123);
_mm_store_si128((__m128i*) (b + 4), vacc4567);
b += 8;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, _mm_load_si128((const __m128i*) buffer));
vacc4567 = _mm_add_epi32(vacc4567, _mm_load_si128((const __m128i*) (buffer + 4)));
buffer += 8;
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 11,320
| 39.870036
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-wasmsimd-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
const v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
const v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
b += 16;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
v128_t vacc89AB = wasm_v128_load(b + 8);
v128_t vaccCDEF = wasm_v128_load(b + 12);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
b += 16;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 16; channels -= 16) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
v128_t vacc89AB = wasm_v128_load(buffer + 8);
v128_t vaccCDEF = wasm_v128_load(buffer + 12);
buffer += 16;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 14,705
| 40.897436
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-wasmsimd-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
const v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
const v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
const v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vaccGHIJKLMN));
const v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vaccGHIJKLMN));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
wasm_v128_store(b + 16, vaccGHIJ);
wasm_v128_store(b + 20, vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
v128_t vacc89AB = wasm_v128_load(b + 8);
v128_t vaccCDEF = wasm_v128_load(b + 12);
v128_t vaccGHIJ = wasm_v128_load(b + 16);
v128_t vaccKLMN = wasm_v128_load(b + 20);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vaccGHIJKLMN));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vaccGHIJKLMN));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
wasm_v128_store(b + 8, vacc89AB);
wasm_v128_store(b + 12, vaccCDEF);
wasm_v128_store(b + 16, vaccGHIJ);
wasm_v128_store(b + 20, vaccKLMN);
b += 24;
}
if XNN_UNLIKELY(c != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 24; channels -= 24) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
v128_t vacc89AB = wasm_v128_load(buffer + 8);
v128_t vaccCDEF = wasm_v128_load(buffer + 12);
v128_t vaccGHIJ = wasm_v128_load(buffer + 16);
v128_t vaccKLMN = wasm_v128_load(buffer + 20);
buffer += 24;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_extend_low_i16x8(vaccGHIJKLMN));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_extend_high_i16x8(vaccGHIJKLMN));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNGHIJKLMN = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = wasm_i8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, voutGHIJKLMNGHIJKLMN, 0);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 21,700
| 43.018256
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-fp32-wasmsimd-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7p7x__wasmsimd_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
const v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
const v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(b);
v128_t vacc4567 = wasm_v128_load(b + 4);
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
wasm_v128_store(b, vacc0123);
wasm_v128_store(b + 4, vacc4567);
b += 8;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 8; channels -= 8) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_v128_load(buffer);
v128_t vacc4567 = wasm_v128_load(buffer + 4);
buffer += 8;
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_extend_low_i16x8(vacc01234567));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 10,401
| 36.283154
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-rndnu-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 16) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 16)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 16; channels -= 16) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 13,661
| 42.788462
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-rndnu-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
const int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
const int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 24; c -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 24; channels -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 19,843
| 44.829099
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-rndnu-neon-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
const int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
const int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
const int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
const int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
const int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
const int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c >= 32; c -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(b + 8);
int32x4_t vaccCDEF = vld1q_s32(b + 12);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(b + 16);
int32x4_t vaccKLMN = vld1q_s32(b + 20);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(b + 24);
int32x4_t vaccSTUV = vld1q_s32(b + 28);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vaccOPQR = vaddw_s16(vaccOPQR, vget_low_s16(vsumOPQRSTUV));
vaccSTUV = vaddw_s16(vaccSTUV, vget_high_s16(vsumOPQRSTUV));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
vst1q_s32(b, vacc89AB); b += 4;
vst1q_s32(b, vaccCDEF); b += 4;
vst1q_s32(b, vaccGHIJ); b += 4;
vst1q_s32(b, vaccKLMN); b += 4;
vst1q_s32(b, vaccOPQR); b += 4;
vst1q_s32(b, vaccSTUV); b += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
c = doz(c, 8);
} while (c != 0);
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 32; channels -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc89AB = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccCDEF = vld1q_s32(buffer); buffer += 4;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vaccGHIJ = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccKLMN = vld1q_s32(buffer); buffer += 4;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vaccOPQR = vld1q_s32(buffer); buffer += 4;
int32x4_t vaccSTUV = vld1q_s32(buffer); buffer += 4;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc89AB = vaddw_s16(vacc89AB, vget_low_s16(vsum89ABCDEF));
vaccCDEF = vaddw_s16(vaccCDEF, vget_high_s16(vsum89ABCDEF));
vaccGHIJ = vaddw_s16(vaccGHIJ, vget_low_s16(vsumGHIJKLMN));
vaccKLMN = vaddw_s16(vaccKLMN, vget_high_s16(vsumGHIJKLMN));
vaccOPQR = vaddw_s16(vaccOPQR, vget_low_s16(vsumOPQRSTUV));
vaccSTUV = vaddw_s16(vaccSTUV, vget_high_s16(vsumOPQRSTUV));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vaccOPQR = vqshlq_s32(vaccOPQR, vleft_pre_shift);
vaccSTUV = vqshlq_s32(vaccSTUV, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vaccOPQR = vqdmulhq_s32(vaccOPQR, vmultiplier);
vaccSTUV = vqdmulhq_s32(vaccSTUV, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vleft_post_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 23,544
| 46.565657
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7p7x-minmax-rndnu-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/multipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7p7x__neon_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int32_t* buffer,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows > 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
const size_t input_increment = 7 * input_stride - round_up_po2(channels, 8) * sizeof(int8_t);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
const int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
const int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
for (rows -= 7; rows > 7; rows -= 7) {
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
int32_t* b = buffer;
size_t c = channels;
for (; c != 0; c = doz(c, 8)) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(b);
int32x4_t vacc4567 = vld1q_s32(b + 4);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vst1q_s32(b, vacc0123); b += 4;
vst1q_s32(b, vacc4567); b += 4;
}
}
i0 = (const int8_t*) ((uintptr_t) i0 + input_increment);
i1 = (const int8_t*) ((uintptr_t) i1 + input_increment);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
i2 = (const int8_t*) ((uintptr_t) i2 + input_increment);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
i3 = (const int8_t*) ((uintptr_t) i3 + input_increment);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
i4 = (const int8_t*) ((uintptr_t) i4 + input_increment);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
i5 = (const int8_t*) ((uintptr_t) i5 + input_increment);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
i6 = (const int8_t*) ((uintptr_t) i6 + input_increment);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 8; channels -= 8) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const int8x8_t vi0x01234567 = vld1_s8(i0);
const int8x8_t vi1x01234567 = vld1_s8(i1);
const int8x8_t vi2x01234567 = vld1_s8(i2);
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3);
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4);
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5);
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6);
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
int32x4_t vacc0123 = vld1q_s32(buffer); buffer += 4;
int32x4_t vacc4567 = vld1q_s32(buffer); buffer += 4;
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vacc0123 = vaddw_s16(vacc0123, vget_low_s16(vsum01234567));
vacc4567 = vaddw_s16(vacc4567, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 9,618
| 38.422131
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 16; channels -= 16) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,348
| 40.745
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 24; channels -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,208
| 43.386957
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neon-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 32; channels -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vaccGHIJ = vreinterpretq_s32_f32(vaddq_f32(vfpaccGHIJ, vmagic_bias));
vaccKLMN = vreinterpretq_s32_f32(vaddq_f32(vfpaccKLMN, vmagic_bias));
vaccOPQR = vreinterpretq_s32_f32(vaddq_f32(vfpaccOPQR, vmagic_bias));
vaccSTUV = vreinterpretq_s32_f32(vaddq_f32(vfpaccSTUV, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = vqsubq_s32(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = vqsubq_s32(vaccKLMN, vmagic_bias_less_output_zero_point);
vaccOPQR = vqsubq_s32(vaccOPQR, vmagic_bias_less_output_zero_point);
vaccSTUV = vqsubq_s32(vaccSTUV, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,878
| 45.584314
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neon_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
for (; channels >= 8; channels -= 8) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 6,401
| 36.881657
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neonv8-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 16; channels -= 16) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 7,856
| 39.292308
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neonv8-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 24; channels -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 9,573
| 41.741071
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neonv8-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 32; channels -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
float32x4_t vfpaccGHIJ = vcvtq_f32_s32(vaccGHIJ);
float32x4_t vfpaccKLMN = vcvtq_f32_s32(vaccKLMN);
float32x4_t vfpaccOPQR = vcvtq_f32_s32(vaccOPQR);
float32x4_t vfpaccSTUV = vcvtq_f32_s32(vaccSTUV);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vfpaccGHIJ = vmulq_f32(vfpaccGHIJ, vscale);
vfpaccKLMN = vmulq_f32(vfpaccKLMN, vscale);
vfpaccOPQR = vmulq_f32(vfpaccOPQR, vscale);
vfpaccSTUV = vmulq_f32(vfpaccSTUV, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
vaccGHIJ = vcvtnq_s32_f32(vfpaccGHIJ);
vaccKLMN = vcvtnq_s32_f32(vfpaccKLMN);
vaccOPQR = vcvtnq_s32_f32(vfpaccOPQR);
vaccSTUV = vcvtnq_s32_f32(vfpaccSTUV);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,100
| 43.762097
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-neonv8-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__neonv8_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neonv8.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
for (; channels >= 8; channels -= 8) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 6,052
| 35.684848
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-fmagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
| 2,792
| 30.382022
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-fmagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c2(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 2; channels -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (int8_t) vout;
}
}
| 4,521
| 29.554054
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-fmagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_fmagic_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_fmagic.init_bias;
const float vscale = params->fp32_scalar_fmagic.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
for (; channels >= 4; channels -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
}
| 6,199
| 31.631579
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-imagic-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
| 2,676
| 28.744444
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-imagic-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c2(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 2; channels -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output = (int8_t) vout;
}
}
| 4,332
| 27.506579
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-imagic-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_imagic_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
for (; channels >= 4; channels -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
int32_t vout2 = (int32_t) float_as_uint32(vfpacc2);
int32_t vout3 = (int32_t) float_as_uint32(vfpacc3);
vout0 = math_max_s32(vout0, vmagic_min);
vout1 = math_max_s32(vout1, vmagic_min);
vout2 = math_max_s32(vout2, vmagic_min);
vout3 = math_max_s32(vout3, vmagic_min);
vout0 = math_min_s32(vout0, vmagic_max);
vout1 = math_min_s32(vout1, vmagic_max);
vout2 = math_min_s32(vout2, vmagic_max);
vout3 = math_min_s32(vout3, vmagic_max);
vout0 -= vmagic_bias_less_zero_point;
vout1 -= vmagic_bias_less_zero_point;
vout2 -= vmagic_bias_less_zero_point;
vout3 -= vmagic_bias_less_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
}
| 5,938
| 29.30102
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-lrintf-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c1(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
| 2,696
| 29.303371
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-lrintf-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c2(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 2; channels -= 2) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
i0 += 2;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
i1 += 2;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
i2 += 2;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
i3 += 2;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
i4 += 2;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
i5 += 2;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
i6 += 2;
vacc0 += vi6x0;
vacc1 += vi6x1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(channels != 0) {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0;
const int32_t vi1 = (int32_t) *i1;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output = (int8_t) vout;
}
}
| 4,397
| 28.716216
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-scalar-lrintf-c4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/math.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__scalar_lrintf_c4(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32_t vinit_bias = params->fp32_scalar_lrintf.init_bias;
const float vscale = params->fp32_scalar_lrintf.scale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
for (; channels >= 4; channels -= 4) {
const int32_t vi0x0 = (int32_t) i0[0];
const int32_t vi0x1 = (int32_t) i0[1];
const int32_t vi0x2 = (int32_t) i0[2];
const int32_t vi0x3 = (int32_t) i0[3];
i0 += 4;
int32_t vacc0 = vi0x0 + vinit_bias;
const int32_t vi1x0 = (int32_t) i1[0];
int32_t vacc1 = vi0x1 + vinit_bias;
const int32_t vi1x1 = (int32_t) i1[1];
int32_t vacc2 = vi0x2 + vinit_bias;
const int32_t vi1x2 = (int32_t) i1[2];
int32_t vacc3 = vi0x3 + vinit_bias;
const int32_t vi1x3 = (int32_t) i1[3];
i1 += 4;
vacc0 += vi1x0;
const int32_t vi2x0 = (int32_t) i2[0];
vacc1 += vi1x1;
const int32_t vi2x1 = (int32_t) i2[1];
vacc2 += vi1x2;
const int32_t vi2x2 = (int32_t) i2[2];
vacc3 += vi1x3;
const int32_t vi2x3 = (int32_t) i2[3];
i2 += 4;
vacc0 += vi2x0;
const int32_t vi3x0 = (int32_t) i3[0];
vacc1 += vi2x1;
const int32_t vi3x1 = (int32_t) i3[1];
vacc2 += vi2x2;
const int32_t vi3x2 = (int32_t) i3[2];
vacc3 += vi2x3;
const int32_t vi3x3 = (int32_t) i3[3];
i3 += 4;
vacc0 += vi3x0;
const int32_t vi4x0 = (int32_t) i4[0];
vacc1 += vi3x1;
const int32_t vi4x1 = (int32_t) i4[1];
vacc2 += vi3x2;
const int32_t vi4x2 = (int32_t) i4[2];
vacc3 += vi3x3;
const int32_t vi4x3 = (int32_t) i4[3];
i4 += 4;
vacc0 += vi4x0;
const int32_t vi5x0 = (int32_t) i5[0];
vacc1 += vi4x1;
const int32_t vi5x1 = (int32_t) i5[1];
vacc2 += vi4x2;
const int32_t vi5x2 = (int32_t) i5[2];
vacc3 += vi4x3;
const int32_t vi5x3 = (int32_t) i5[3];
i5 += 4;
vacc0 += vi5x0;
const int32_t vi6x0 = (int32_t) i6[0];
vacc1 += vi5x1;
const int32_t vi6x1 = (int32_t) i6[1];
vacc2 += vi5x2;
const int32_t vi6x2 = (int32_t) i6[2];
vacc3 += vi5x3;
const int32_t vi6x3 = (int32_t) i6[3];
i6 += 4;
vacc0 += vi6x0;
vacc1 += vi6x1;
vacc2 += vi6x2;
vacc3 += vi6x3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0);
const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1);
const int32_t vrndacc2 = (int32_t) lrintf(vfpacc2);
const int32_t vrndacc3 = (int32_t) lrintf(vfpacc3);
int32_t vout0 = vrndacc0 + voutput_zero_point;
int32_t vout1 = vrndacc1 + voutput_zero_point;
int32_t vout2 = vrndacc2 + voutput_zero_point;
int32_t vout3 = vrndacc3 + voutput_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(channels != 0) {
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) *i0++;
const int32_t vi1 = (int32_t) *i1++;
vacc += vi0;
const int32_t vi2 = (int32_t) *i2++;
vacc += vi1;
const int32_t vi3 = (int32_t) *i3++;
vacc += vi2;
const int32_t vi4 = (int32_t) *i4++;
vacc += vi3;
const int32_t vi5 = (int32_t) *i5++;
vacc += vi4;
const int32_t vi6 = (int32_t) *i6++;
vacc += vi5;
vacc += vi6;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
const int32_t vrndacc = (int32_t) lrintf(vfpacc);
int32_t vout = vrndacc + voutput_zero_point;
*output++ = (int8_t) vout;
} while (--channels != 0);
}
}
| 6,047
| 30.831579
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-sse2-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
i0 += 16;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
i1 += 16;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
i2 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (int8_t) vout0123;
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,990
| 42.442688
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-sse2-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 24; channels -= 24) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi0x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
const __m128i vi0xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i0 + 16));
i0 += 24;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vxi0x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x89ABCDEF, vi0x89ABCDEF), 8);
const __m128i vi1x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
const __m128i vxi0xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi0xGHIJKLMN, vi0xGHIJKLMN), 8);
const __m128i vi1xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i1 + 16));
i1 += 24;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vxi1x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x89ABCDEF, vi1x89ABCDEF), 8);
const __m128i vi2x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
const __m128i vxi1xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi1xGHIJKLMN, vi1xGHIJKLMN), 8);
const __m128i vi2xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i2 + 16));
i2 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x89ABCDEF, vi2x89ABCDEF), 8);
const __m128i vi3x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi2xGHIJKLMN, vi2xGHIJKLMN), 8);
const __m128i vi3xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i3 + 16));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x89ABCDEF, vi3x89ABCDEF), 8);
const __m128i vi4x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i4 + 8));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi3xGHIJKLMN, vi3xGHIJKLMN), 8);
const __m128i vi4xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i4 + 16));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x89ABCDEF, vi4x89ABCDEF), 8);
const __m128i vi5x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i5 + 8));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi4xGHIJKLMN, vi4xGHIJKLMN), 8);
const __m128i vi5xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i5 + 16));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x89ABCDEF, vi5x89ABCDEF), 8);
const __m128i vi6x89ABCDEF = _mm_loadl_epi64((const __m128i*) (i6 + 8));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi5xGHIJKLMN, vi5xGHIJKLMN), 8);
const __m128i vi6xGHIJKLMN = _mm_loadl_epi64((const __m128i*) (i6 + 16));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x89ABCDEF, vi6x89ABCDEF), 8);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_srai_epi16(_mm_unpacklo_epi8(vi6xGHIJKLMN, vi6xGHIJKLMN), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
const __m128i vsgnacc89ABCDEF = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc89ABCDEF);
__m128i vacc89AB = _mm_unpacklo_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
__m128i vaccCDEF = _mm_unpackhi_epi16(vacc89ABCDEF, vsgnacc89ABCDEF);
const __m128i vsgnaccGHIJKLMN = _mm_cmpgt_epi16(_mm_setzero_si128(), vaccGHIJKLMN);
__m128i vaccGHIJ = _mm_unpacklo_epi16(vaccGHIJKLMN, vsgnaccGHIJKLMN);
__m128i vaccKLMN = _mm_unpackhi_epi16(vaccGHIJKLMN, vsgnaccGHIJKLMN);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
__m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
__m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
voutGHIJKLMN = _mm_max_epi16(voutGHIJKLMN, voutput_min);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (int8_t) vout0123;
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 13,702
| 46.251724
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-sse2-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse2_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vi0x01234567 = _mm_loadl_epi64((const __m128i*) i0);
i0 += 8;
const __m128i vi1x01234567 = _mm_loadl_epi64((const __m128i*) i1);
i1 += 8;
const __m128i vxi0x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi0x01234567, vi0x01234567), 8);
const __m128i vi2x01234567 = _mm_loadl_epi64((const __m128i*) i2);
i2 += 8;
const __m128i vxi1x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi1x01234567, vi1x01234567), 8);
const __m128i vi3x01234567 = _mm_loadl_epi64((const __m128i*) i3);
i3 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi2x01234567, vi2x01234567), 8);
const __m128i vi4x01234567 = _mm_loadl_epi64((const __m128i*) i4);
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi3x01234567, vi3x01234567), 8);
const __m128i vi5x01234567 = _mm_loadl_epi64((const __m128i*) i5);
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi4x01234567, vi4x01234567), 8);
const __m128i vi6x01234567 = _mm_loadl_epi64((const __m128i*) i6);
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi5x01234567, vi5x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vi6x01234567, vi6x01234567), 8);
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
const __m128i vsgnacc01234567 = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc01234567);
__m128i vacc0123 = _mm_unpacklo_epi16(vacc01234567, vsgnacc01234567);
__m128i vacc4567 = _mm_unpackhi_epi16(vacc01234567, vsgnacc01234567);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
uint32_t vout0123 = (uint32_t) _mm_cvtsi128_si32(vout0123456701234567);
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) vout0123);
vout0123 >>= 16;
output += 2;
}
if (channels & 1) {
*output = (int8_t) vout0123;
}
}
}
}
| 8,154
| 37.833333
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-sse41-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 16; channels -= 16) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
i0 += 16;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
i1 += 16;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
i2 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
i3 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
i4 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
i5 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
i6 += 16;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 9,021
| 41.356808
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-sse41-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 24; channels -= 24) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
const __m128i vxi0x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 8)));
const __m128i vxi0xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i0 + 16)));
i0 += 24;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
const __m128i vxi1x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 8)));
const __m128i vxi1xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i1 + 16)));
i1 += 24;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
__m128i vacc89ABCDEF = _mm_add_epi16(vxi0x89ABCDEF, vxi1x89ABCDEF);
const __m128i vxi2x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 8)));
__m128i vaccGHIJKLMN = _mm_add_epi16(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const __m128i vxi2xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i2 + 16)));
i2 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi2x89ABCDEF);
const __m128i vxi3x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi2xGHIJKLMN);
const __m128i vxi3xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i3 + 16)));
i3 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi3x89ABCDEF);
const __m128i vxi4x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi3xGHIJKLMN);
const __m128i vxi4xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i4 + 16)));
i4 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi4x89ABCDEF);
const __m128i vxi5x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi4xGHIJKLMN);
const __m128i vxi5xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i5 + 16)));
i5 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi5x89ABCDEF);
const __m128i vxi6x89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 8)));
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi5xGHIJKLMN);
const __m128i vxi6xGHIJKLMN = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i6 + 16)));
i6 += 24;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
vacc89ABCDEF = _mm_add_epi16(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = _mm_add_epi16(vaccGHIJKLMN, vxi6xGHIJKLMN);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
__m128i vacc89AB = _mm_cvtepi16_epi32(vacc89ABCDEF);
__m128i vaccCDEF = _mm_srai_epi32(_mm_unpackhi_epi16(vacc89ABCDEF, vacc89ABCDEF), 16);
__m128i vaccGHIJ = _mm_cvtepi16_epi32(vaccGHIJKLMN);
__m128i vaccKLMN = _mm_srai_epi32(_mm_unpackhi_epi16(vaccGHIJKLMN, vaccGHIJKLMN), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
vacc89AB = _mm_add_epi32(vacc89AB, vinit_bias);
vaccCDEF = _mm_add_epi32(vaccCDEF, vinit_bias);
vaccGHIJ = _mm_add_epi32(vaccGHIJ, vinit_bias);
vaccKLMN = _mm_add_epi32(vaccKLMN, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vacc89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vaccCDEF);
__m128 vfpaccGHIJ = _mm_cvtepi32_ps(vaccGHIJ);
__m128 vfpaccKLMN = _mm_cvtepi32_ps(vaccKLMN);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
vfpaccGHIJ = _mm_mul_ps(vfpaccGHIJ, vscale);
vfpaccKLMN = _mm_mul_ps(vfpaccKLMN, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vfpacc89AB = _mm_min_ps(vfpacc89AB, voutput_max_less_zero_point);
vfpaccCDEF = _mm_min_ps(vfpaccCDEF, voutput_max_less_zero_point);
vfpaccGHIJ = _mm_min_ps(vfpaccGHIJ, voutput_max_less_zero_point);
vfpaccKLMN = _mm_min_ps(vfpaccKLMN, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
vaccGHIJ = _mm_cvtps_epi32(vfpaccGHIJ);
vaccKLMN = _mm_cvtps_epi32(vfpaccKLMN);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i voutGHIJKLMN = _mm_adds_epi16(_mm_packs_epi32(vaccGHIJ, vaccKLMN), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
__m128i voutGHIJKLMNGHIJKLMN = _mm_packs_epi16(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNGHIJKLMN = _mm_max_epi8(voutGHIJKLMNGHIJKLMN, voutput_min);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
_mm_storel_epi64((__m128i*) (output + 16), voutGHIJKLMNGHIJKLMN);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
if XNN_LIKELY(channels >= 8) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,100
| 44.871901
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-sse41-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-sse4.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/gavgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__sse41_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse4.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
for (; channels >= 8; channels -= 8) {
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const __m128i vxi0x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i0));
i0 += 8;
const __m128i vxi1x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i1));
i1 += 8;
__m128i vacc01234567 = _mm_add_epi16(vxi0x01234567, vxi1x01234567);
const __m128i vxi2x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i2));
i2 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi2x01234567);
const __m128i vxi3x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i3));
i3 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi3x01234567);
const __m128i vxi4x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i4));
i4 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi4x01234567);
const __m128i vxi5x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i5));
i5 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi5x01234567);
const __m128i vxi6x01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i6));
i6 += 8;
vacc01234567 = _mm_add_epi16(vacc01234567, vxi6x01234567);
__m128i vacc0123 = _mm_cvtepi16_epi32(vacc01234567);
__m128i vacc4567 = _mm_srai_epi32(_mm_unpackhi_epi16(vacc01234567, vacc01234567), 16);
vacc0123 = _mm_add_epi32(vacc0123, vinit_bias);
vacc4567 = _mm_add_epi32(vacc4567, vinit_bias);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vacc0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vacc4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc0123 = _mm_min_ps(vfpacc0123, voutput_max_less_zero_point);
vfpacc4567 = _mm_min_ps(vfpacc4567, voutput_max_less_zero_point);
vacc0123 = _mm_cvtps_epi32(vfpacc0123);
vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
if (channels & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 6,896
| 37.530726
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-wasmsimd-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 16; channels -= 16) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
i0 += 16;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
i1 += 16;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
i2 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
i3 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
i4 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
i5 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
i6 += 16;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,474
| 38.976415
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-wasmsimd-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 24; channels -= 24) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
i0 += 24;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
i1 += 24;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
i2 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
i3 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
i4 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
i5 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
i6 += 24;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vaccGHIJKLMN));
v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vaccGHIJKLMN));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNGHIJKLMN = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutGHIJKLMN);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNGHIJKLMN = wasm_i8x16_min(voutGHIJKLMNGHIJKLMN, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store64_lane(output + 16, voutGHIJKLMNGHIJKLMN, 0);
output += 24;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 10,357
| 41.979253
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-wasmsimd-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 32; channels -= 32) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
const v128_t vxi0x89ABCDEF = wasm_i16x8_load8x8(i0 + 8);
const v128_t vxi0xGHIJKLMN = wasm_i16x8_load8x8(i0 + 16);
const v128_t vxi0xOPQRSTUV = wasm_i16x8_load8x8(i0 + 24);
i0 += 32;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
const v128_t vxi1x89ABCDEF = wasm_i16x8_load8x8(i1 + 8);
const v128_t vxi1xGHIJKLMN = wasm_i16x8_load8x8(i1 + 16);
const v128_t vxi1xOPQRSTUV = wasm_i16x8_load8x8(i1 + 24);
i1 += 32;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
v128_t vacc89ABCDEF = wasm_i16x8_add(vxi0x89ABCDEF, vxi1x89ABCDEF);
const v128_t vxi2x89ABCDEF = wasm_i16x8_load8x8(i2 + 8);
v128_t vaccGHIJKLMN = wasm_i16x8_add(vxi0xGHIJKLMN, vxi1xGHIJKLMN);
const v128_t vxi2xGHIJKLMN = wasm_i16x8_load8x8(i2 + 16);
v128_t vaccOPQRSTUV = wasm_i16x8_add(vxi0xOPQRSTUV, vxi1xOPQRSTUV);
const v128_t vxi2xOPQRSTUV = wasm_i16x8_load8x8(i2 + 24);
i2 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi2x89ABCDEF);
const v128_t vxi3x89ABCDEF = wasm_i16x8_load8x8(i3 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi2xGHIJKLMN);
const v128_t vxi3xGHIJKLMN = wasm_i16x8_load8x8(i3 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi2xOPQRSTUV);
const v128_t vxi3xOPQRSTUV = wasm_i16x8_load8x8(i3 + 24);
i3 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi3x89ABCDEF);
const v128_t vxi4x89ABCDEF = wasm_i16x8_load8x8(i4 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi3xGHIJKLMN);
const v128_t vxi4xGHIJKLMN = wasm_i16x8_load8x8(i4 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi3xOPQRSTUV);
const v128_t vxi4xOPQRSTUV = wasm_i16x8_load8x8(i4 + 24);
i4 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi4x89ABCDEF);
const v128_t vxi5x89ABCDEF = wasm_i16x8_load8x8(i5 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi4xGHIJKLMN);
const v128_t vxi5xGHIJKLMN = wasm_i16x8_load8x8(i5 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi4xOPQRSTUV);
const v128_t vxi5xOPQRSTUV = wasm_i16x8_load8x8(i5 + 24);
i5 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi5x89ABCDEF);
const v128_t vxi6x89ABCDEF = wasm_i16x8_load8x8(i6 + 8);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi5xGHIJKLMN);
const v128_t vxi6xGHIJKLMN = wasm_i16x8_load8x8(i6 + 16);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi5xOPQRSTUV);
const v128_t vxi6xOPQRSTUV = wasm_i16x8_load8x8(i6 + 24);
i6 += 32;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
vacc89ABCDEF = wasm_i16x8_add(vacc89ABCDEF, vxi6x89ABCDEF);
vaccGHIJKLMN = wasm_i16x8_add(vaccGHIJKLMN, vxi6xGHIJKLMN);
vaccOPQRSTUV = wasm_i16x8_add(vaccOPQRSTUV, vxi6xOPQRSTUV);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
v128_t vacc89AB = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc89ABCDEF));
v128_t vaccCDEF = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc89ABCDEF));
v128_t vaccGHIJ = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vaccGHIJKLMN));
v128_t vaccKLMN = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vaccGHIJKLMN));
v128_t vaccOPQR = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vaccOPQRSTUV));
v128_t vaccSTUV = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vaccOPQRSTUV));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vaccGHIJ = wasm_f32x4_convert_i32x4(vaccGHIJ);
vaccKLMN = wasm_f32x4_convert_i32x4(vaccKLMN);
vaccOPQR = wasm_f32x4_convert_i32x4(vaccOPQR);
vaccSTUV = wasm_f32x4_convert_i32x4(vaccSTUV);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vaccGHIJ = wasm_f32x4_mul(vaccGHIJ, vscale);
vaccKLMN = wasm_f32x4_mul(vaccKLMN, vscale);
vaccOPQR = wasm_f32x4_mul(vaccOPQR, vscale);
vaccSTUV = wasm_f32x4_mul(vaccSTUV, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vaccGHIJ = wasm_f32x4_add(vaccGHIJ, vmagic_bias);
vaccKLMN = wasm_f32x4_add(vaccKLMN, vmagic_bias);
vaccOPQR = wasm_f32x4_add(vaccOPQR, vmagic_bias);
vaccSTUV = wasm_f32x4_add(vaccSTUV, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vaccGHIJ = wasm_i32x4_max(vaccGHIJ, vmagic_min);
vaccKLMN = wasm_i32x4_max(vaccKLMN, vmagic_min);
vaccOPQR = wasm_i32x4_max(vaccOPQR, vmagic_min);
vaccSTUV = wasm_i32x4_max(vaccSTUV, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
vaccGHIJ = wasm_i32x4_sub(vaccGHIJ, vmagic_bias_less_output_zero_point);
vaccKLMN = wasm_i32x4_sub(vaccKLMN, vmagic_bias_less_output_zero_point);
vaccOPQR = wasm_i32x4_sub(vaccOPQR, vmagic_bias_less_output_zero_point);
vaccSTUV = wasm_i32x4_sub(vaccSTUV, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t voutGHIJKLMN = wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN);
v128_t voutOPQRSTUV = wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNOPQRSTUV = wasm_i8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = wasm_i8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(channels != 0) {
do {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(channels >= 8) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
channels -= 8;
} else {
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,999
| 43.94382
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-fp32-wasmsimd-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_fp32_ukernel_7x__wasmsimd_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const v128_t vinit_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.init_bias);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; channels >= 8; channels -= 8) {
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const v128_t vxi0x01234567 = wasm_i16x8_load8x8(i0);
i0 += 8;
const v128_t vxi1x01234567 = wasm_i16x8_load8x8(i1);
i1 += 8;
v128_t vacc01234567 = wasm_i16x8_add(vxi0x01234567, vxi1x01234567);
const v128_t vxi2x01234567 = wasm_i16x8_load8x8(i2);
i2 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi2x01234567);
const v128_t vxi3x01234567 = wasm_i16x8_load8x8(i3);
i3 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi3x01234567);
const v128_t vxi4x01234567 = wasm_i16x8_load8x8(i4);
i4 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi4x01234567);
const v128_t vxi5x01234567 = wasm_i16x8_load8x8(i5);
i5 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi5x01234567);
const v128_t vxi6x01234567 = wasm_i16x8_load8x8(i6);
i6 += 8;
vacc01234567 = wasm_i16x8_add(vacc01234567, vxi6x01234567);
v128_t vacc0123 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_low_i16x8(vacc01234567));
v128_t vacc4567 = wasm_i32x4_add(vinit_bias, wasm_i32x4_extend_high_i16x8(vacc01234567));
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
const v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if (channels & 4) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (channels & 2) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (channels & 1) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 6,562
| 35.870787
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-rndnu-neon-c16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c16(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 16; channels -= 16) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 8,089
| 40.27551
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-rndnu-neon-c24.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c24(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 24; channels -= 24) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x8_t voutGHIJKLMN = vqmovn_s16(vaccGHIJKLMN);
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMN = vmax_s8(voutGHIJKLMN, vget_low_s8(voutput_min));
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMN = vmin_s8(voutGHIJKLMN, vget_low_s8(voutput_max));
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1_s8(output, voutGHIJKLMN); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 9,838
| 42.728889
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-rndnu-neon-c32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c32(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 32; channels -= 32) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi0x89ABCDEF = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xGHIJKLMN = vld1_s8(i0); i0 += 8;
const int8x8_t vi0xOPQRSTUV = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi1x89ABCDEF = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xGHIJKLMN = vld1_s8(i1); i1 += 8;
const int8x8_t vi1xOPQRSTUV = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi2x89ABCDEF = vld1_s8(i2); i2 += 8;
int16x8_t vsum89ABCDEF = vaddl_s8(vi0x89ABCDEF, vi1x89ABCDEF);
const int8x8_t vi2xGHIJKLMN = vld1_s8(i2); i2 += 8;
int16x8_t vsumGHIJKLMN = vaddl_s8(vi0xGHIJKLMN, vi1xGHIJKLMN);
const int8x8_t vi2xOPQRSTUV = vld1_s8(i2); i2 += 8;
int16x8_t vsumOPQRSTUV = vaddl_s8(vi0xOPQRSTUV, vi1xOPQRSTUV);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi3x89ABCDEF = vld1_s8(i3); i3 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi2x89ABCDEF);
const int8x8_t vi3xGHIJKLMN = vld1_s8(i3); i3 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi2xGHIJKLMN);
const int8x8_t vi3xOPQRSTUV = vld1_s8(i3); i3 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi2xOPQRSTUV);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi4x89ABCDEF = vld1_s8(i4); i4 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi3x89ABCDEF);
const int8x8_t vi4xGHIJKLMN = vld1_s8(i4); i4 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi3xGHIJKLMN);
const int8x8_t vi4xOPQRSTUV = vld1_s8(i4); i4 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi3xOPQRSTUV);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi5x89ABCDEF = vld1_s8(i5); i5 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi4x89ABCDEF);
const int8x8_t vi5xGHIJKLMN = vld1_s8(i5); i5 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi4xGHIJKLMN);
const int8x8_t vi5xOPQRSTUV = vld1_s8(i5); i5 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi4xOPQRSTUV);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
const int8x8_t vi6x89ABCDEF = vld1_s8(i6); i6 += 8;
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi5x89ABCDEF);
const int8x8_t vi6xGHIJKLMN = vld1_s8(i6); i6 += 8;
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi5xGHIJKLMN);
const int8x8_t vi6xOPQRSTUV = vld1_s8(i6); i6 += 8;
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi5xOPQRSTUV);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
vsum89ABCDEF = vaddw_s8(vsum89ABCDEF, vi6x89ABCDEF);
vsumGHIJKLMN = vaddw_s8(vsumGHIJKLMN, vi6xGHIJKLMN);
vsumOPQRSTUV = vaddw_s8(vsumOPQRSTUV, vi6xOPQRSTUV);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
int32x4_t vacc89AB = vaddw_s16(vinit_bias, vget_low_s16(vsum89ABCDEF));
int32x4_t vaccCDEF = vaddw_s16(vinit_bias, vget_high_s16(vsum89ABCDEF));
int32x4_t vaccGHIJ = vaddw_s16(vinit_bias, vget_low_s16(vsumGHIJKLMN));
int32x4_t vaccKLMN = vaddw_s16(vinit_bias, vget_high_s16(vsumGHIJKLMN));
int32x4_t vaccOPQR = vaddw_s16(vinit_bias, vget_low_s16(vsumOPQRSTUV));
int32x4_t vaccSTUV = vaddw_s16(vinit_bias, vget_high_s16(vsumOPQRSTUV));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vaccGHIJ = vqshlq_s32(vaccGHIJ, vleft_pre_shift);
vaccKLMN = vqshlq_s32(vaccKLMN, vleft_pre_shift);
vaccOPQR = vqshlq_s32(vaccOPQR, vleft_pre_shift);
vaccSTUV = vqshlq_s32(vaccSTUV, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vaccGHIJ = vqdmulhq_s32(vaccGHIJ, vmultiplier);
vaccKLMN = vqdmulhq_s32(vaccKLMN, vmultiplier);
vaccOPQR = vqdmulhq_s32(vaccOPQR, vmultiplier);
vaccSTUV = vqdmulhq_s32(vaccSTUV, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vleft_post_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vleft_post_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vleft_post_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
int16x8_t vaccGHIJKLMN = vqmovn_high_s32(vqmovn_s32(vaccGHIJ), vaccKLMN);
int16x8_t vaccOPQRSTUV = vqmovn_high_s32(vqmovn_s32(vaccOPQR), vaccSTUV);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
int16x8_t vaccGHIJKLMN = vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN));
int16x8_t vaccOPQRSTUV = vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
vaccGHIJKLMN = vqaddq_s16(vaccGHIJKLMN, voutput_zero_point);
vaccOPQRSTUV = vqaddq_s16(vaccOPQRSTUV, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
int8x16_t voutGHIJKLMNOPQRSTUV = vqmovn_high_s16(vqmovn_s16(vaccGHIJKLMN), vaccOPQRSTUV);
#else // !XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
int8x16_t voutGHIJKLMNOPQRSTUV = vcombine_s8(vqmovn_s16(vaccGHIJKLMN), vqmovn_s16(vaccOPQRSTUV));
#endif // !XNN_ARCH_ARM64
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_s8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_s8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
vst1q_s8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(channels != 0) {
do {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(channels >= 8) {
vst1_s8(output, vout01234567); output += 8;
channels -= 8;
} else {
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0); output += 1;
}
channels = 0;
}
} while (channels != 0);
}
}
| 11,397
| 44.7751
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gavgpool/gen/qs8-gavgpool-7x-minmax-rndnu-neon-c8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gavgpool/unipass-neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gavgpool.h>
void xnn_qs8_gavgpool_minmax_rndnu_ukernel_7x__neon_c8(
size_t rows,
size_t channels,
const int8_t* input,
size_t input_stride,
const int8_t* zero,
int8_t* output,
const union xnn_qs8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(rows != 0);
assert(rows <= 7);
assert(channels != 0);
const int8_t* i0 = input;
const int8_t* i1 = (const int8_t*) ((uintptr_t) i0 + input_stride);
if XNN_UNPREDICTABLE(rows < 2) {
i1 = zero;
}
const int8_t* i2 = (const int8_t*) ((uintptr_t) i1 + input_stride);
if XNN_UNPREDICTABLE(rows <= 2) {
i2 = zero;
}
const int8_t* i3 = (const int8_t*) ((uintptr_t) i2 + input_stride);
if XNN_UNPREDICTABLE(rows < 4) {
i3 = zero;
}
const int8_t* i4 = (const int8_t*) ((uintptr_t) i3 + input_stride);
if XNN_UNPREDICTABLE(rows <= 4) {
i4 = zero;
}
const int8_t* i5 = (const int8_t*) ((uintptr_t) i4 + input_stride);
if XNN_UNPREDICTABLE(rows < 6) {
i5 = zero;
}
const int8_t* i6 = (const int8_t*) ((uintptr_t) i5 + input_stride);
if XNN_UNPREDICTABLE(rows <= 6) {
i6 = zero;
}
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->rndnu_neon.init_bias);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
for (; channels >= 8; channels -= 8) {
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else // !XNN_ARCH_ARM64
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif // !XNN_ARCH_ARM64
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else // !XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif // !XNN_ARCH_ARM64
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(channels != 0) {
{
const int8x8_t vi0x01234567 = vld1_s8(i0); i0 += 8;
const int8x8_t vi1x01234567 = vld1_s8(i1); i1 += 8;
const int8x8_t vi2x01234567 = vld1_s8(i2); i2 += 8;
int16x8_t vsum01234567 = vaddl_s8(vi0x01234567, vi1x01234567);
const int8x8_t vi3x01234567 = vld1_s8(i3); i3 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi2x01234567);
const int8x8_t vi4x01234567 = vld1_s8(i4); i4 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi3x01234567);
const int8x8_t vi5x01234567 = vld1_s8(i5); i5 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi4x01234567);
const int8x8_t vi6x01234567 = vld1_s8(i6); i6 += 8;
vsum01234567 = vaddw_s8(vsum01234567, vi5x01234567);
vsum01234567 = vaddw_s8(vsum01234567, vi6x01234567);
int32x4_t vacc0123 = vaddw_s16(vinit_bias, vget_low_s16(vsum01234567));
int32x4_t vacc4567 = vaddw_s16(vinit_bias, vget_high_s16(vsum01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (channels & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (channels & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (channels & 1) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 6,253
| 36.674699
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16-minmax-fp32-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc0x89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x89AB, vmagic_bias));
vacc0xCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpacc0xCDEF, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc0x89AB = vqsubq_s32(vacc0x89AB, vmagic_bias_less_output_zero_point);
vacc0xCDEF = vqsubq_s32(vacc0xCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,074
| 52.942953
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16-minmax-fp32-neonv8-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_gemm_minmax_fp32_ukernel_1x16__neonv8_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,854
| 52.383838
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16-minmax-rndnu-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
xnn_prefetch_to_l1((const int8_t*) w + 512);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,210
| 52.678808
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc7 = vmovl_s8(vb89ABCDEFc7);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc0 = vmovl_s8(vb89ABCDEFc0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc1 = vmovl_s8(vb89ABCDEFc1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc2 = vmovl_s8(vb89ABCDEFc2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc3 = vmovl_s8(vb89ABCDEFc3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc4 = vmovl_s8(vb89ABCDEFc4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc5 = vmovl_s8(vb89ABCDEFc5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb89ABCDEFc6 = vmovl_s8(vb89ABCDEFc6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x89AB = vmlal_lane_s16(vacc0x89AB, vget_low_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
vacc0xCDEF = vmlal_lane_s16(vacc0xCDEF, vget_high_s16(vxb89ABCDEFc6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 16,073
| 52.759197
| 112
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16-minmax-rndnu-neon-mull-addw-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/neon-mull-addw-dup.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/gemm.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16__neon_mull_addw_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc0 = vmull_s8(vb89ABCDEFc0, vdup_lane_s8(va0, 0));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc0));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc0));
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc1 = vmull_s8(vb89ABCDEFc1, vdup_lane_s8(va0, 1));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc1));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc1));
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc2 = vmull_s8(vb89ABCDEFc2, vdup_lane_s8(va0, 2));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc2));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc2));
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc3 = vmull_s8(vb89ABCDEFc3, vdup_lane_s8(va0, 3));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc3));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc3));
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc4 = vmull_s8(vb89ABCDEFc4, vdup_lane_s8(va0, 4));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc4));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc4));
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc5 = vmull_s8(vb89ABCDEFc5, vdup_lane_s8(va0, 5));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc5));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc5));
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc6 = vmull_s8(vb89ABCDEFc6, vdup_lane_s8(va0, 6));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc6));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc6));
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va0, 7));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c7));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c7));
const int8x8_t vb89ABCDEFc7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x89ABCDEFc7 = vmull_s8(vb89ABCDEFc7, vdup_lane_s8(va0, 7));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc7));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc7));
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0));
const int16x8_t vprod0x89ABCDEFc0 = vmull_s8(vb89ABCDEFc0, vdup_lane_s8(va0, 0));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc0));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc0));
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1));
const int16x8_t vprod0x89ABCDEFc1 = vmull_s8(vb89ABCDEFc1, vdup_lane_s8(va0, 1));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc1));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc1));
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2));
const int16x8_t vprod0x89ABCDEFc2 = vmull_s8(vb89ABCDEFc2, vdup_lane_s8(va0, 2));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc2));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc2));
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3));
const int16x8_t vprod0x89ABCDEFc3 = vmull_s8(vb89ABCDEFc3, vdup_lane_s8(va0, 3));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc3));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc3));
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4));
const int16x8_t vprod0x89ABCDEFc4 = vmull_s8(vb89ABCDEFc4, vdup_lane_s8(va0, 4));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc4));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc4));
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5));
const int16x8_t vprod0x89ABCDEFc5 = vmull_s8(vb89ABCDEFc5, vdup_lane_s8(va0, 5));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc5));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc5));
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABCDEFc6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6));
vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6));
vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6));
const int16x8_t vprod0x89ABCDEFc6 = vmull_s8(vb89ABCDEFc6, vdup_lane_s8(va0, 6));
vacc0x89AB = vaddw_s16(vacc0x89AB, vget_low_s16(vprod0x89ABCDEFc6));
vacc0xCDEF = vaddw_s16(vacc0xCDEF, vget_high_s16(vprod0x89ABCDEFc6));
}
}
}
}
}
}
}
// Post-accumulation work
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
// Main case where there the 16 columns fit in the destination.
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
// Advance to the next 16 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,836
| 53.051195
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c16-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c16-neon-mlal.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c16__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 16 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x8 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x9 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x10 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x11 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x12 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x13 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x14 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x15 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
// KC loop of 16
size_t k = kc;
while (k != 0) {
const int8x16_t va0 = vld1q_s8(a0); a0 += 16;
const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb8 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb9 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb10 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb11 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb12 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb13 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb14 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
const int8x16_t vb15 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t));
int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0));
vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0));
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0));
vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0));
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0));
vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0));
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0));
vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0));
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0));
vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0));
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0));
vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0));
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0));
vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0));
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0));
vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0));
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
int16x8_t vprod0x8 = vmull_s8(vget_low_s8(vb8), vget_low_s8(va0));
vprod0x8 = vmlal_s8(vprod0x8, vget_high_s8(vb8), vget_high_s8(va0));
vacc0x8 = vpadalq_s16(vacc0x8, vprod0x8);
int16x8_t vprod0x9 = vmull_s8(vget_low_s8(vb9), vget_low_s8(va0));
vprod0x9 = vmlal_s8(vprod0x9, vget_high_s8(vb9), vget_high_s8(va0));
vacc0x9 = vpadalq_s16(vacc0x9, vprod0x9);
int16x8_t vprod0x10 = vmull_s8(vget_low_s8(vb10), vget_low_s8(va0));
vprod0x10 = vmlal_s8(vprod0x10, vget_high_s8(vb10), vget_high_s8(va0));
vacc0x10 = vpadalq_s16(vacc0x10, vprod0x10);
int16x8_t vprod0x11 = vmull_s8(vget_low_s8(vb11), vget_low_s8(va0));
vprod0x11 = vmlal_s8(vprod0x11, vget_high_s8(vb11), vget_high_s8(va0));
vacc0x11 = vpadalq_s16(vacc0x11, vprod0x11);
int16x8_t vprod0x12 = vmull_s8(vget_low_s8(vb12), vget_low_s8(va0));
vprod0x12 = vmlal_s8(vprod0x12, vget_high_s8(vb12), vget_high_s8(va0));
vacc0x12 = vpadalq_s16(vacc0x12, vprod0x12);
int16x8_t vprod0x13 = vmull_s8(vget_low_s8(vb13), vget_low_s8(va0));
vprod0x13 = vmlal_s8(vprod0x13, vget_high_s8(vb13), vget_high_s8(va0));
vacc0x13 = vpadalq_s16(vacc0x13, vprod0x13);
int16x8_t vprod0x14 = vmull_s8(vget_low_s8(vb14), vget_low_s8(va0));
vprod0x14 = vmlal_s8(vprod0x14, vget_high_s8(vb14), vget_high_s8(va0));
vacc0x14 = vpadalq_s16(vacc0x14, vprod0x14);
int16x8_t vprod0x15 = vmull_s8(vget_low_s8(vb15), vget_low_s8(va0));
vprod0x15 = vmlal_s8(vprod0x15, vget_high_s8(vb15), vget_high_s8(va0));
vacc0x15 = vpadalq_s16(vacc0x15, vprod0x15);
k -= 16 * sizeof(int8_t);
}
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
const int32x4_t vsum0x89 = vpaddq_s32(vacc0x8, vacc0x9);
const int32x4_t vsum0xAB = vpaddq_s32(vacc0x10, vacc0x11);
const int32x4_t vsum0xCD = vpaddq_s32(vacc0x12, vacc0x13);
const int32x4_t vsum0xEF = vpaddq_s32(vacc0x14, vacc0x15);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vsum0x89, vsum0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vsum0xCD, vsum0xEF);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
const int32x2_t vpsum0x8 = vadd_s32(vget_low_s32(vacc0x8), vget_high_s32(vacc0x8));
const int32x2_t vpsum0x9 = vadd_s32(vget_low_s32(vacc0x9), vget_high_s32(vacc0x9));
const int32x2_t vpsum0xA = vadd_s32(vget_low_s32(vacc0x10), vget_high_s32(vacc0x10));
const int32x2_t vpsum0xB = vadd_s32(vget_low_s32(vacc0x11), vget_high_s32(vacc0x11));
const int32x2_t vsum0x89 = vpadd_s32(vpsum0x8, vpsum0x9);
const int32x2_t vsum0xAB = vpadd_s32(vpsum0xA, vpsum0xB);
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB );
const int32x2_t vpsum0xC = vadd_s32(vget_low_s32(vacc0x12), vget_high_s32(vacc0x12));
const int32x2_t vpsum0xD = vadd_s32(vget_low_s32(vacc0x13), vget_high_s32(vacc0x13));
const int32x2_t vpsum0xE = vadd_s32(vget_low_s32(vacc0x14), vget_high_s32(vacc0x14));
const int32x2_t vpsum0xF = vadd_s32(vget_low_s32(vacc0x15), vget_high_s32(vacc0x15));
const int32x2_t vsum0xCD = vpadd_s32(vpsum0xC, vpsum0xD);
const int32x2_t vsum0xEF = vpadd_s32(vpsum0xE, vpsum0xF);
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF );
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,773
| 54.765182
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,259
| 55.315789
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,470
| 54.472934
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,162
| 54.544928
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0c0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0c0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0c0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0c0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0c1x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0c1x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0c1x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0c1x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0c2x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0c2x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0c2x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0c2x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0c3x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0c3x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0c3x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0c3x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,939
| 54.380117
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,989
| 48.341564
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,056
| 48.012195
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,956
| 48.004098
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2-minmax-rndnu-neon-mull-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2__neon_mull_ld4r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
const int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3, va0c3);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
const int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3, va0c3);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0, va0c0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
const int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0, va0c0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1, va0c1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
const int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1, va0c1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89ABc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDEFc2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2, va0c2);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
const int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2, va0c2);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
}
}
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,883
| 47.90535
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2s4-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
const int8x8_t vb89ABc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc0 = vmlal_s8(vprod0x89ABc0, vb89ABc0x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
const int8x8_t vbCDEFc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc0 = vmlal_s8(vprod0xCDEFc0, vbCDEFc0x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
const int8x8_t vb89ABc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc1 = vmlal_s8(vprod0x89ABc1, vb89ABc1x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
const int8x8_t vbCDEFc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc1 = vmlal_s8(vprod0xCDEFc1, vbCDEFc1x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
const int8x8_t vb89ABc2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc2 = vmlal_s8(vprod0x89ABc2, vb89ABc2x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
const int8x8_t vbCDEFc2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc2 = vmlal_s8(vprod0xCDEFc2, vbCDEFc2x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
const int8x8_t vb89ABc3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89ABc3 = vmlal_s8(vprod0x89ABc3, vb89ABc3x1, va0x1);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
const int8x8_t vbCDEFc3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDEFc3 = vmlal_s8(vprod0xCDEFc3, vbCDEFc3x1, va0x1);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 13,551
| 47.924188
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c2s4-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c2s4__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0);
int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1);
int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2);
int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0);
vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3);
int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0);
vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3);
k -= 8 * sizeof(int8_t);
} while (k != 0);
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,867
| 41.76087
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0c0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0c0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0c0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0c0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0c0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0c0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0c0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0c0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0c1x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0c1x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0c1x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0c1x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0c1x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0c1x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0c1x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0c1x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,692
| 54.633929
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0c0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0c0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0c0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0c0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0c0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0c0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0c0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0c0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0c1x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0c1x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0c1x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0c1x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0c1x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0c1x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0c1x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0c1x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,727
| 54.244838
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0c0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0c0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0c0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0c0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0c0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0c0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0c0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0c0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0c1x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0c1x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0c1x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0c1x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0c1x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0c1x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0c1x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0c1x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,564
| 54.252976
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neon-mull-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_dup(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,135
| 49.14876
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neon-mull-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld1r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,146
| 48.987654
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neon-mull-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neon_mull_ld2r(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
const int16x8_t vprod0x89c1 = vmull_s8(vb89c1, va0c1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
const int16x8_t vprod0xABc1 = vmull_s8(vbABc1, va0c1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
const int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1, va0c1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
const int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1, va0c1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb89c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbABc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbCDc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vbEFc0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int16x8_t vprod0x89c0 = vmull_s8(vb89c0, va0c0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
const int16x8_t vprod0xABc0 = vmull_s8(vbABc0, va0c0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
const int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0, va0c0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
const int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0, va0c0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,093
| 48.975207
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
const int8_t* a0 = a;
int8_t* c0 = c;
// Loop over groups of 16 columns.
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 4;
// Load a 4x16 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x16 --> 1x16.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0);
vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
}
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
// Main case where there the 16 columns fit in the destination.
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
// Advance to the next 16 columns.
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,190
| 41.3
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4s2-minmax-rndnu-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
const int8x8_t vb89c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89c0 = vmlal_s8(vprod0x89c0, vb89c0x1, va0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
const int8x8_t vbABc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xABc0 = vmlal_s8(vprod0xABc0, vbABc0x1, va0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
const int8x8_t vbCDc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDc0 = vmlal_s8(vprod0xCDc0, vbCDc0x1, va0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
const int8x8_t vbEFc0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xEFc0 = vmlal_s8(vprod0xEFc0, vbEFc0x1, va0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
const int8x8_t vb89c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x89c1 = vmlal_s8(vprod0x89c1, vb89c1x1, va0x1);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
const int8x8_t vbABc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xABc1 = vmlal_s8(vprod0xABc1, vbABc1x1, va0x1);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
const int8x8_t vbCDc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xCDc1 = vmlal_s8(vprod0xCDc1, vbCDc1x1, va0x1);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
const int8x8_t vbEFc1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0xEFc1 = vmlal_s8(vprod0xEFc1, vbEFc1x1, va0x1);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
}
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,532
| 48.264407
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-gemm/gen/qs8-gemm-1x16c4s2-minmax-rndnu-neon-mull.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-gemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_gemm_minmax_rndnu_ukernel_1x16c4s2__neon_mull(
size_t mr,
size_t nc,
size_t kc,
const int8_t* restrict a,
size_t a_stride,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(kc % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
const int8_t* a0 = a;
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x89 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xAB = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xCD = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0xEF = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t k = kc;
do {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb89c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbCDc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vbEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
int16x8_t vprod0x89c0 = vmull_s8(vb89c0x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c0);
int16x8_t vprod0xABc0 = vmull_s8(vbABc0x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc0);
int16x8_t vprod0xCDc0 = vmull_s8(vbCDc0x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc0);
int16x8_t vprod0xEFc0 = vmull_s8(vbEFc0x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
int16x8_t vprod0x89c1 = vmull_s8(vb89c1x0, va0x0);
vacc0x89 = vpadalq_s16(vacc0x89, vprod0x89c1);
int16x8_t vprod0xABc1 = vmull_s8(vbABc1x0, va0x0);
vacc0xAB = vpadalq_s16(vacc0xAB, vprod0xABc1);
int16x8_t vprod0xCDc1 = vmull_s8(vbCDc1x0, va0x0);
vacc0xCD = vpadalq_s16(vacc0xCD, vprod0xCDc1);
int16x8_t vprod0xEFc1 = vmull_s8(vbEFc1x0, va0x0);
vacc0xEF = vpadalq_s16(vacc0xEF, vprod0xEFc1);
k -= 8 * sizeof(int8_t);
} while (k != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
int32x4_t vacc0x89AB = vpaddq_s32(vacc0x89, vacc0xAB);
int32x4_t vacc0xCDEF = vpaddq_s32(vacc0xCD, vacc0xEF);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
const int32x2_t vsum0x89 = vpadd_s32(vget_low_s32(vacc0x89), vget_high_s32(vacc0x89));
const int32x2_t vsum0xAB = vpadd_s32(vget_low_s32(vacc0xAB), vget_high_s32(vacc0xAB));
int32x4_t vacc0x89AB = vcombine_s32(vsum0x89, vsum0xAB);
const int32x2_t vsum0xCD = vpadd_s32(vget_low_s32(vacc0xCD), vget_high_s32(vacc0xCD));
const int32x2_t vsum0xEF = vpadd_s32(vget_low_s32(vacc0xEF), vget_high_s32(vacc0xEF));
int32x4_t vacc0xCDEF = vcombine_s32(vsum0xCD, vsum0xEF);
#endif
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point);
int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_s8(c0 + 0, vout0x0123456789ABCDEF);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a0 = (const int8_t*) ((uintptr_t) a0 - kc);
nc -= 16;
} else {
// Final case where not all of the 16 columns fit in the destination.
int8x8_t vout0x01234567 = vget_low_s8(vout0x0123456789ABCDEF);
if (nc & 8) {
vst1_s8(c0, vout0x01234567); c0 += 8;
vout0x01234567 = vget_high_s8(vout0x0123456789ABCDEF);
}
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,328
| 44.286408
| 107
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.