repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__neon_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vmultiplier = vld1q_dup_s16(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 2,073
| 30.907692
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__scalar_x1(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t vmultiplier = params->scalar.multiplier;
do {
int32_t vacc = *input++;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,017
| 23.829268
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__scalar_x2(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t vmultiplier = params->scalar.multiplier;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
int32_t vacc0 = input[0];
int32_t vacc1 = input[1];
input += 2;
vacc0 = vbias + vacc0 * vmultiplier;
vacc1 = vbias + vacc1 * vmultiplier;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vacc = *input;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output = (int8_t) vout;
}
}
| 1,547
| 24.8
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__scalar_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t vmultiplier = params->scalar.multiplier;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
int32_t vacc0 = input[0];
int32_t vacc1 = input[1];
int32_t vacc2 = input[2];
int32_t vacc3 = input[3];
input += 4;
vacc0 = vbias + vacc0 * vmultiplier;
vacc1 = vbias + vacc1 * vmultiplier;
vacc2 = vbias + vacc2 * vmultiplier;
vacc3 = vbias + vacc3 * vmultiplier;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
int32_t vout2 = math_asr_s32(vacc2, 8);
int32_t vout3 = math_asr_s32(vacc3, 8);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout2 = math_max_s32(vout2, -128);
vout3 = math_max_s32(vout3, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
vout2 = math_min_s32(vout2, 127);
vout3 = math_min_s32(vout3, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vacc = *input++;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 2,078
| 26.355263
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vcvt_ukernel__sse2_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vm0);
const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vm0);
const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
const __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier);
const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
const __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier);
__m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
__m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
__m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_sub_epi32(vbias, vacc0);
vacc1 = _mm_sub_epi32(vbias, vacc1);
vacc2 = _mm_sub_epi32(vbias, vacc2);
vacc3 = _mm_sub_epi32(vbias, vacc3);
vacc0 = _mm_srai_epi32(vacc0, 8);
vacc1 = _mm_srai_epi32(vacc1, 8);
vacc2 = _mm_srai_epi32(vacc2, 8);
vacc3 = _mm_srai_epi32(vacc3, 8);
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc1 = _mm_packs_epi32(vacc2, vacc3);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
__m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 5,757
| 35.443038
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vcvt_ukernel__sse2_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vm0);
const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vm0);
const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
const __m128i vextx2 = _mm_unpacklo_epi8(vx1, vm1);
const __m128i vextx3 = _mm_unpackhi_epi8(vx1, vm1);
const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
const __m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier);
const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
const __m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier);
const __m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier);
const __m128i vprodhi2 = _mm_mulhi_epi16(vextx2, vmultiplier);
const __m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier);
const __m128i vprodhi3 = _mm_mulhi_epi16(vextx3, vmultiplier);
__m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
__m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
__m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
__m128i vacc4 = _mm_unpacklo_epi16(vprodlo2, vprodhi2);
__m128i vacc5 = _mm_unpackhi_epi16(vprodlo2, vprodhi2);
__m128i vacc6 = _mm_unpacklo_epi16(vprodlo3, vprodhi3);
__m128i vacc7 = _mm_unpackhi_epi16(vprodlo3, vprodhi3);
vacc0 = _mm_sub_epi32(vbias, vacc0);
vacc1 = _mm_sub_epi32(vbias, vacc1);
vacc2 = _mm_sub_epi32(vbias, vacc2);
vacc3 = _mm_sub_epi32(vbias, vacc3);
vacc4 = _mm_sub_epi32(vbias, vacc4);
vacc5 = _mm_sub_epi32(vbias, vacc5);
vacc6 = _mm_sub_epi32(vbias, vacc6);
vacc7 = _mm_sub_epi32(vbias, vacc7);
vacc0 = _mm_srai_epi32(vacc0, 8);
vacc1 = _mm_srai_epi32(vacc1, 8);
vacc2 = _mm_srai_epi32(vacc2, 8);
vacc3 = _mm_srai_epi32(vacc3, 8);
vacc4 = _mm_srai_epi32(vacc4, 8);
vacc5 = _mm_srai_epi32(vacc5, 8);
vacc6 = _mm_srai_epi32(vacc6, 8);
vacc7 = _mm_srai_epi32(vacc7, 8);
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc1 = _mm_packs_epi32(vacc2, vacc3);
vacc2 = _mm_packs_epi32(vacc4, vacc5);
vacc3 = _mm_packs_epi32(vacc6, vacc7);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vm);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vm);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epi16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epi16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_sub_epi32(vbias, vacc_ll);
vacc_lh = _mm_sub_epi32(vbias, vacc_lh);
vacc_hl = _mm_sub_epi32(vbias, vacc_hl);
vacc_hh = _mm_sub_epi32(vbias, vacc_hh);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
__m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 7,025
| 37.604396
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__sse41_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,056
| 31.870968
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__sse41_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc2 = _mm_slli_epi16(vacc2, 7);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,730
| 34.533333
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__sse41_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,280
| 31.126761
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-ssse3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vcvt_ukernel__ssse3_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
__m128i vacc0 = _mm_unpacklo_epi8(vx0, vm0);
__m128i vacc1 = _mm_unpackhi_epi8(vx0, vm0);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 4,121
| 33.932203
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-ssse3-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vcvt_ukernel__ssse3_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
__m128i vacc0 = _mm_unpacklo_epi8(vx0, vm0);
__m128i vacc1 = _mm_unpackhi_epi8(vx0, vm0);
const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
__m128i vacc2 = _mm_unpacklo_epi8(vx1, vm1);
__m128i vacc3 = _mm_unpackhi_epi8(vx1, vm1);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc2 = _mm_slli_epi16(vacc2, 7);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 4,857
| 35.80303
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-wasmrelaxedsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__wasmrelaxedsimd_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
input += 16;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,002
| 31.641304
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-wasmrelaxedsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__wasmrelaxedsimd_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
v128_t vacc2 = wasm_i16x8_load8x8(input + 16);
v128_t vacc3 = wasm_i16x8_load8x8(input + 24);
input += 32;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier);
vacc2 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc2, vmultiplier);
vacc3 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc3, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,653
| 34.134615
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-wasmrelaxedsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__wasmrelaxedsimd_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,247
| 31.114286
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__wasmsimd_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
input += 16;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,931
| 30.869565
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__wasmsimd_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
v128_t vacc2 = wasm_i16x8_load8x8(input + 16);
v128_t vacc3 = wasm_i16x8_load8x8(input + 24);
input += 32;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier);
vacc2 = wasm_i16x8_q15mulr_sat(vacc2, vmultiplier);
vacc3 = wasm_i16x8_q15mulr_sat(vacc3, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,550
| 33.144231
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vcvt/gen/qs8-vcvt-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vcvt_ukernel__wasmsimd_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,208
| 30.557143
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vhswish/gen/qs8-vhswish-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vhswish.h>
void xnn_qs8_vhswish_ukernel__neon_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vinput_scale_div_exp = vld1q_dup_s16(¶ms->neon.input_scale_div_exp);
const int16x8_t vinput_scale_div_mantissa = vld1q_dup_s16(¶ms->neon.input_scale_div_mantissa);
const int16x8_t vscale_ratio = vld1q_dup_s16(¶ms->neon.scale_ratio);
const int16x8_t vhalf = vdupq_n_s16(16384);
const int16x8_t vzero = vdupq_n_s16(0);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t vx0 = vld1q_s8(input); input += 16;
int16x8_t vacc0 = vsubw_s8(vinput_zero_point, vget_low_s8(vx0));
int16x8_t vacc1 = vsubw_s8(vinput_zero_point, vget_high_s8(vx0));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
int16x8_t vin0 = vqdmulhq_s16(vacc0, vinput_scale_div_mantissa);
int16x8_t vin1 = vqdmulhq_s16(vacc1, vinput_scale_div_mantissa);
vin0 = vqshlq_s16(vin0, vinput_scale_div_exp);
vin1 = vqshlq_s16(vin1, vinput_scale_div_exp);
vin0 = vqsubq_s16(vin0, vhalf);
vin1 = vqsubq_s16(vin1, vhalf);
vin0 = vminq_s16(vin0, vzero);
vin1 = vminq_s16(vin1, vzero);
int16x8_t vout0 = vqdmulhq_s16(vacc0, vscale_ratio);
int16x8_t vout1 = vqdmulhq_s16(vacc1, vscale_ratio);
vout0 = vqdmulhq_s16(vout0, vin0);
vout1 = vqdmulhq_s16(vout1, vin1);
vout0 = vqaddq_s16(vout0, voutput_zero_point);
vout1 = vqaddq_s16(vout1, voutput_zero_point);
const int8x16_t vy0 = vcombine_s8(vqmovn_s16(vout0), vqmovn_s16(vout1));
vst1q_s8(output, vy0); output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vout);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vout);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 3,961
| 37.843137
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vhswish/gen/qs8-vhswish-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vhswish.h>
void xnn_qs8_vhswish_ukernel__neon_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vinput_scale_div_exp = vld1q_dup_s16(¶ms->neon.input_scale_div_exp);
const int16x8_t vinput_scale_div_mantissa = vld1q_dup_s16(¶ms->neon.input_scale_div_mantissa);
const int16x8_t vscale_ratio = vld1q_dup_s16(¶ms->neon.scale_ratio);
const int16x8_t vhalf = vdupq_n_s16(16384);
const int16x8_t vzero = vdupq_n_s16(0);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x16_t vx0 = vld1q_s8(input); input += 16;
const int8x16_t vx1 = vld1q_s8(input); input += 16;
int16x8_t vacc0 = vsubw_s8(vinput_zero_point, vget_low_s8(vx0));
int16x8_t vacc1 = vsubw_s8(vinput_zero_point, vget_high_s8(vx0));
int16x8_t vacc2 = vsubw_s8(vinput_zero_point, vget_low_s8(vx1));
int16x8_t vacc3 = vsubw_s8(vinput_zero_point, vget_high_s8(vx1));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc2 = vshlq_n_s16(vacc2, 7);
vacc3 = vshlq_n_s16(vacc3, 7);
int16x8_t vin0 = vqdmulhq_s16(vacc0, vinput_scale_div_mantissa);
int16x8_t vin1 = vqdmulhq_s16(vacc1, vinput_scale_div_mantissa);
int16x8_t vin2 = vqdmulhq_s16(vacc2, vinput_scale_div_mantissa);
int16x8_t vin3 = vqdmulhq_s16(vacc3, vinput_scale_div_mantissa);
vin0 = vqshlq_s16(vin0, vinput_scale_div_exp);
vin1 = vqshlq_s16(vin1, vinput_scale_div_exp);
vin2 = vqshlq_s16(vin2, vinput_scale_div_exp);
vin3 = vqshlq_s16(vin3, vinput_scale_div_exp);
vin0 = vqsubq_s16(vin0, vhalf);
vin1 = vqsubq_s16(vin1, vhalf);
vin2 = vqsubq_s16(vin2, vhalf);
vin3 = vqsubq_s16(vin3, vhalf);
vin0 = vminq_s16(vin0, vzero);
vin1 = vminq_s16(vin1, vzero);
vin2 = vminq_s16(vin2, vzero);
vin3 = vminq_s16(vin3, vzero);
int16x8_t vout0 = vqdmulhq_s16(vacc0, vscale_ratio);
int16x8_t vout1 = vqdmulhq_s16(vacc1, vscale_ratio);
int16x8_t vout2 = vqdmulhq_s16(vacc2, vscale_ratio);
int16x8_t vout3 = vqdmulhq_s16(vacc3, vscale_ratio);
vout0 = vqdmulhq_s16(vout0, vin0);
vout1 = vqdmulhq_s16(vout1, vin1);
vout2 = vqdmulhq_s16(vout2, vin2);
vout3 = vqdmulhq_s16(vout3, vin3);
vout0 = vqaddq_s16(vout0, voutput_zero_point);
vout1 = vqaddq_s16(vout1, voutput_zero_point);
vout2 = vqaddq_s16(vout2, voutput_zero_point);
vout3 = vqaddq_s16(vout3, voutput_zero_point);
const int8x16_t vy0 = vcombine_s8(vqmovn_s16(vout0), vqmovn_s16(vout1));
const int8x16_t vy1 = vcombine_s8(vqmovn_s16(vout2), vqmovn_s16(vout3));
vst1q_s8(output, vy0); output += 16;
vst1q_s8(output, vy1); output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vout);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vout);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 5,020
| 39.821138
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vhswish/gen/qs8-vhswish-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vhswish.h>
void xnn_qs8_vhswish_ukernel__neon_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vinput_scale_div_exp = vld1q_dup_s16(¶ms->neon.input_scale_div_exp);
const int16x8_t vinput_scale_div_mantissa = vld1q_dup_s16(¶ms->neon.input_scale_div_mantissa);
const int16x8_t vscale_ratio = vld1q_dup_s16(¶ms->neon.scale_ratio);
const int16x8_t vhalf = vdupq_n_s16(16384);
const int16x8_t vzero = vdupq_n_s16(0);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vout);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vout);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 2,826
| 34.78481
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vhswish/gen/qs8-vhswish-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vhswish.h>
void xnn_qs8_vhswish_ukernel__scalar_x1(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vinput_zero_point = (uint32_t) params->scalar.input_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
const int32_t vinput_scale_div_mantissa = params->scalar.input_scale_div_mantissa;
const int32_t vinput_scale_div_exp = params->scalar.input_scale_div_exp;
const int32_t vscale_ratio = params->scalar.scale_ratio;
do {
const int32_t vacc = (int32_t) ((vinput_zero_point - (uint32_t) *input++) << 7);
int32_t vin = vacc * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin <<= vinput_scale_div_exp;
} else {
vin >>= -vinput_scale_div_exp;
}
vin -= 16384;
vin = math_min_s32(vin, 0);
vin = math_max_s32(vin, -32768);
int32_t vout = math_asr_s32(vacc * vscale_ratio, 15);
vout = math_asr_s32(vin * vout, 15) + voutput_zero_point;
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,660
| 30.339623
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vhswish/gen/qs8-vhswish-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vhswish.h>
void xnn_qs8_vhswish_ukernel__scalar_x2(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vinput_zero_point = (uint32_t) params->scalar.input_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
const int32_t vinput_scale_div_mantissa = params->scalar.input_scale_div_mantissa;
const int32_t vinput_scale_div_exp = params->scalar.input_scale_div_exp;
const int32_t vscale_ratio = params->scalar.scale_ratio;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
int32_t vacc0 = (int32_t) ((vinput_zero_point - (uint32_t) input[0]) << 7);
int32_t vacc1 = (int32_t) ((vinput_zero_point - (uint32_t) input[1]) << 7);
input += 2;
int32_t vin0 = vacc0 * vinput_scale_div_mantissa;
int32_t vin1 = vacc1 * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin0 <<= vinput_scale_div_exp;
vin1 <<= vinput_scale_div_exp;
} else {
vin0 >>= -vinput_scale_div_exp;
vin1 >>= -vinput_scale_div_exp;
}
vin0 -= 16384;
vin1 -= 16384;
vin0 = math_min_s32(vin0, 0);
vin1 = math_min_s32(vin1, 0);
vin0 = math_max_s32(vin0, -32768);
vin1 = math_max_s32(vin1, -32768);
int32_t vout0 = math_asr_s32(vacc0 * vscale_ratio, 15);
int32_t vout1 = math_asr_s32(vacc1 * vscale_ratio, 15);
vout0 = math_asr_s32(vin0 * vout0, 15) + voutput_zero_point;
vout1 = math_asr_s32(vin1 * vout1, 15) + voutput_zero_point;
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t vacc = (int32_t) ((vinput_zero_point - (uint32_t) *input++) << 7);
int32_t vin = vacc * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin <<= vinput_scale_div_exp;
} else {
vin >>= -vinput_scale_div_exp;
}
vin -= 16384;
vin = math_min_s32(vin, 0);
vin = math_max_s32(vin, -32768);
int32_t vout = math_asr_s32(vacc * vscale_ratio, 15);
vout = math_asr_s32(vin * vout, 15) + voutput_zero_point;
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
}
}
| 2,876
| 30.271739
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vhswish/gen/qs8-vhswish-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vhswish.h>
void xnn_qs8_vhswish_ukernel__scalar_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vinput_zero_point = (uint32_t) params->scalar.input_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
const int32_t vinput_scale_div_mantissa = params->scalar.input_scale_div_mantissa;
const int32_t vinput_scale_div_exp = params->scalar.input_scale_div_exp;
const int32_t vscale_ratio = params->scalar.scale_ratio;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
int32_t vacc0 = (int32_t) ((vinput_zero_point - (uint32_t) input[0]) << 7);
int32_t vacc1 = (int32_t) ((vinput_zero_point - (uint32_t) input[1]) << 7);
int32_t vacc2 = (int32_t) ((vinput_zero_point - (uint32_t) input[2]) << 7);
int32_t vacc3 = (int32_t) ((vinput_zero_point - (uint32_t) input[3]) << 7);
input += 4;
int32_t vin0 = vacc0 * vinput_scale_div_mantissa;
int32_t vin1 = vacc1 * vinput_scale_div_mantissa;
int32_t vin2 = vacc2 * vinput_scale_div_mantissa;
int32_t vin3 = vacc3 * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin0 <<= vinput_scale_div_exp;
vin1 <<= vinput_scale_div_exp;
vin2 <<= vinput_scale_div_exp;
vin3 <<= vinput_scale_div_exp;
} else {
vin0 >>= -vinput_scale_div_exp;
vin1 >>= -vinput_scale_div_exp;
vin2 >>= -vinput_scale_div_exp;
vin3 >>= -vinput_scale_div_exp;
}
vin0 -= 16384;
vin1 -= 16384;
vin2 -= 16384;
vin3 -= 16384;
vin0 = math_min_s32(vin0, 0);
vin1 = math_min_s32(vin1, 0);
vin2 = math_min_s32(vin2, 0);
vin3 = math_min_s32(vin3, 0);
vin0 = math_max_s32(vin0, -32768);
vin1 = math_max_s32(vin1, -32768);
vin2 = math_max_s32(vin2, -32768);
vin3 = math_max_s32(vin3, -32768);
int32_t vout0 = math_asr_s32(vacc0 * vscale_ratio, 15);
int32_t vout1 = math_asr_s32(vacc1 * vscale_ratio, 15);
int32_t vout2 = math_asr_s32(vacc2 * vscale_ratio, 15);
int32_t vout3 = math_asr_s32(vacc3 * vscale_ratio, 15);
vout0 = math_asr_s32(vin0 * vout0, 15) + voutput_zero_point;
vout1 = math_asr_s32(vin1 * vout1, 15) + voutput_zero_point;
vout2 = math_asr_s32(vin2 * vout2, 15) + voutput_zero_point;
vout3 = math_asr_s32(vin3 * vout3, 15) + voutput_zero_point;
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout2 = math_max_s32(vout2, -128);
vout3 = math_max_s32(vout3, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
vout2 = math_min_s32(vout2, 127);
vout3 = math_min_s32(vout3, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t vacc = (int32_t) ((vinput_zero_point - (uint32_t) *input++) << 7);
int32_t vin = vacc * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin <<= vinput_scale_div_exp;
} else {
vin >>= -vinput_scale_div_exp;
}
vin -= 16384;
vin = math_min_s32(vin, 0);
vin = math_max_s32(vin, -32768);
int32_t vout = math_asr_s32(vacc * vscale_ratio, 15);
vout = math_asr_s32(vin * vout, 15) + voutput_zero_point;
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 4,043
| 32.7
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-armsimd32-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__armsimd32_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x2_t vinput_zero_point = (int16x2_t) params->armsimd32.input_zero_point;
const int16x2_t vpositive_multiplier = (int16x2_t) params->armsimd32.positive_multiplier;
const int16x2_t vnegative_multiplier = (int16x2_t) params->armsimd32.negative_multiplier;
const int32_t vbias = params->armsimd32.bias;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
input += 4;
int16x2_t vx02 = __sxtb16(vx0123);
int16x2_t vx13 = __sxtb16(__ror(vx0123, 8));
vx02 = __ssub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __ssub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
int32_t vacc3 = __smlatt(vmultiplier13, vx13, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 8), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 8), 8);
vacc2 = __ssat(math_asr_s32(vacc2, 8), 8);
vacc3 = __ssat(math_asr_s32(vacc3, 8), 8);
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
output[2] = (int8_t) vacc2;
output[3] = (int8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
int16x2_t vx02 = __sxtb16(vx0123);
int16x2_t vx13 = __sxtb16(__ror(vx0123, 8));
vx02 = __ssub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __ssub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
const int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 8), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 8), 8);
if (batch & (2 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
vacc0 = __ssat(math_asr_s32(vacc2, 8), 8);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
}
}
}
| 3,300
| 34.880435
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-armsimd32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__armsimd32_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x2_t vinput_zero_point = (int16x2_t) params->armsimd32.input_zero_point;
const int16x2_t vpositive_multiplier = (int16x2_t) params->armsimd32.positive_multiplier;
const int16x2_t vnegative_multiplier = (int16x2_t) params->armsimd32.negative_multiplier;
const int32_t vbias = params->armsimd32.bias;
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x4_t vx0123 = (int8x4_t) unaligned_indexed_load_u32(input, 0);
const int8x4_t vx4567 = (int8x4_t) unaligned_indexed_load_u32(input, 1);
input += 8;
int16x2_t vx02 = __sxtb16(vx0123);
int16x2_t vx13 = __sxtb16(__ror(vx0123, 8));
int16x2_t vx46 = __sxtb16(vx4567);
int16x2_t vx57 = __sxtb16(__ror(vx4567, 8));
vx02 = __ssub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __ssub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx46 = __ssub16(vinput_zero_point, vx46);
const int16x2_t vmultiplier46 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx57 = __ssub16(vinput_zero_point, vx57);
const int16x2_t vmultiplier57 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
int32_t vacc3 = __smlatt(vmultiplier13, vx13, vbias);
int32_t vacc4 = __smlabb(vmultiplier46, vx46, vbias);
int32_t vacc5 = __smlabb(vmultiplier57, vx57, vbias);
int32_t vacc6 = __smlatt(vmultiplier46, vx46, vbias);
int32_t vacc7 = __smlatt(vmultiplier57, vx57, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 8), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 8), 8);
vacc2 = __ssat(math_asr_s32(vacc2, 8), 8);
vacc3 = __ssat(math_asr_s32(vacc3, 8), 8);
vacc4 = __ssat(math_asr_s32(vacc4, 8), 8);
vacc5 = __ssat(math_asr_s32(vacc5, 8), 8);
vacc6 = __ssat(math_asr_s32(vacc6, 8), 8);
vacc7 = __ssat(math_asr_s32(vacc7, 8), 8);
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
output[2] = (int8_t) vacc2;
output[3] = (int8_t) vacc3;
output[4] = (int8_t) vacc4;
output[5] = (int8_t) vacc5;
output[6] = (int8_t) vacc6;
output[7] = (int8_t) vacc7;
output += 8;
}
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
input += 4;
int16x2_t vx02 = __sxtb16(vx0123);
int16x2_t vx13 = __sxtb16(__ror(vx0123, 8));
vx02 = __ssub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __ssub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
int32_t vacc3 = __smlatt(vmultiplier13, vx13, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 8), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 8), 8);
vacc2 = __ssat(math_asr_s32(vacc2, 8), 8);
vacc3 = __ssat(math_asr_s32(vacc3, 8), 8);
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
output[2] = (int8_t) vacc2;
output[3] = (int8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const int8x4_t vx0123 = (int8x4_t) unaligned_load_u32(input);
int16x2_t vx02 = __sxtb16(vx0123);
int16x2_t vx13 = __sxtb16(__ror(vx0123, 8));
vx02 = __ssub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __ssub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
const int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
vacc0 = __ssat(math_asr_s32(vacc0, 8), 8);
vacc1 = __ssat(math_asr_s32(vacc1, 8), 8);
if (batch & (2 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
output[1] = (int8_t) vacc1;
vacc0 = __ssat(math_asr_s32(vacc2, 8), 8);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
output[0] = (int8_t) vacc0;
}
}
}
| 5,513
| 38.669065
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__avx_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vmultiplier0 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier1 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,829
| 36.54902
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__avx_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier1 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier2 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier2);
vacc2 = _mm_slli_epi16(vacc2, 7);
vmultiplier3 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier3);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,833
| 39.966102
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__avx_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,723
| 34.842105
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__avx2_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,119
| 36.590361
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__avx2_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m256i vacc0 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
input += 32;
__m256i vmultiplier0 = _mm256_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
__m256i vmultiplier1 = _mm256_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
vmultiplier0 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vmultiplier1 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier1);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
__m256i vy0 = _mm256_packs_epi16(vacc0, vacc1);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,334
| 38.054054
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__avx2_x64(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 64 * sizeof(int8_t); batch -= 64 * sizeof(int8_t)) {
__m256i vacc0 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
__m256i vacc2 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 32)));
__m256i vacc3 = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) (input + 48)));
input += 64;
__m256i vmultiplier0 = _mm256_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
__m256i vmultiplier1 = _mm256_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
__m256i vmultiplier2 = _mm256_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm256_sub_epi16(vinput_zero_point, vacc2);
__m256i vmultiplier3 = _mm256_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm256_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vmultiplier1 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vmultiplier2 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier2);
vacc2 = _mm256_slli_epi16(vacc2, 7);
vmultiplier3 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier3);
vacc3 = _mm256_slli_epi16(vacc3, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm256_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm256_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm256_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm256_adds_epi16(vacc3, voutput_zero_point);
__m256i vy0 = _mm256_packs_epi16(vacc0, vacc1);
__m256i vy1 = _mm256_packs_epi16(vacc2, vacc3);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
vy1 = _mm256_permute4x64_epi64(vy1, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
output += 64;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
__m256i vacc = _mm256_cvtepi8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packs_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,446
| 41.554688
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__neon_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t vx0 = vld1q_s8(input); input += 16;
int16x8_t vacc0 = vsubw_s8(vinput_zero_point, vget_low_s8(vx0));
int16x8_t vacc1 = vsubw_s8(vinput_zero_point, vget_high_s8(vx0));
const uint16x8_t vmask0 = vcltq_s16(vacc0, vmovq_n_s16(0));
const uint16x8_t vmask1 = vcltq_s16(vacc1, vmovq_n_s16(0));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
const int16x8_t vmultiplier0 = vbslq_s16(vmask0, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier1 = vbslq_s16(vmask1, vpositive_multiplier, vnegative_multiplier);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier0);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier1);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
const int8x16_t vy0 = vcombine_s8(vqmovn_s16(vacc0), vqmovn_s16(vacc1));
vst1q_s8(output, vy0); output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 3,495
| 35.8
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__neon_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const int8x16_t vx0 = vld1q_s8(input); input += 16;
const int8x16_t vx1 = vld1q_s8(input); input += 16;
int16x8_t vacc0 = vsubw_s8(vinput_zero_point, vget_low_s8(vx0));
int16x8_t vacc1 = vsubw_s8(vinput_zero_point, vget_high_s8(vx0));
int16x8_t vacc2 = vsubw_s8(vinput_zero_point, vget_low_s8(vx1));
int16x8_t vacc3 = vsubw_s8(vinput_zero_point, vget_high_s8(vx1));
const uint16x8_t vmask0 = vcltq_s16(vacc0, vmovq_n_s16(0));
const uint16x8_t vmask1 = vcltq_s16(vacc1, vmovq_n_s16(0));
const uint16x8_t vmask2 = vcltq_s16(vacc2, vmovq_n_s16(0));
const uint16x8_t vmask3 = vcltq_s16(vacc3, vmovq_n_s16(0));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc2 = vshlq_n_s16(vacc2, 7);
vacc3 = vshlq_n_s16(vacc3, 7);
const int16x8_t vmultiplier0 = vbslq_s16(vmask0, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier1 = vbslq_s16(vmask1, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier2 = vbslq_s16(vmask2, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier3 = vbslq_s16(vmask3, vpositive_multiplier, vnegative_multiplier);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier0);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier1);
vacc2 = vqrdmulhq_s16(vacc2, vmultiplier2);
vacc3 = vqrdmulhq_s16(vacc3, vmultiplier3);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
vacc2 = vqaddq_s16(vacc2, voutput_zero_point);
vacc3 = vqaddq_s16(vacc3, voutput_zero_point);
const int8x16_t vy0 = vcombine_s8(vqmovn_s16(vacc0), vqmovn_s16(vacc1));
const int8x16_t vy1 = vcombine_s8(vqmovn_s16(vacc2), vqmovn_s16(vacc3));
vst1q_s8(output, vy0); output += 16;
vst1q_s8(output, vy1); output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 4,400
| 39.009091
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__neon_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int16x8_t vinput_zero_point = vld1q_dup_s16(¶ms->neon.input_zero_point);
const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t vx = vld1_s8(input); input += 8;
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const int8x8_t vy = vqmovn_s16(vacc);
vst1_s8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const int8x8_t vx = vld1_s8(input);
int16x8_t vacc = vsubw_s8(vinput_zero_point, vx);
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
int8x8_t vy = vqmovn_s16(vacc);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vy), 0); output += 4;
vy = vext_s8(vy, vy, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vy), 0); output += 2;
vy = vext_s8(vy, vy, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vy, 0);
}
}
}
| 2,506
| 34.814286
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-scalar-andxor-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-andxor.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__scalar_andxor_x1(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_andxor.input_zero_point;
const int32_t vmultiplier_diff = params->scalar_andxor.multiplier_diff;
const int32_t vmultiplier_base = params->scalar_andxor.multiplier_base;
const int32_t vbias = params->scalar_andxor.bias;
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = vmultiplier_base ^ (vmultiplier_diff & math_asr_s32(vacc, 31));
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,339
| 29.454545
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-scalar-andxor-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-andxor.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__scalar_andxor_x2(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_andxor.input_zero_point;
const int32_t vmultiplier_diff = params->scalar_andxor.multiplier_diff;
const int32_t vmultiplier_base = params->scalar_andxor.multiplier_base;
const int32_t vbias = params->scalar_andxor.bias;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
input += 2;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
int32_t vmultiplier0 = math_asr_s32(vacc0, 31);
int32_t vmultiplier1 = math_asr_s32(vacc1, 31);
vmultiplier0 &= vmultiplier_diff;
vmultiplier1 &= vmultiplier_diff;
vmultiplier0 ^= vmultiplier_base;
vmultiplier1 ^= vmultiplier_base;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = vmultiplier_base ^ (vmultiplier_diff & math_asr_s32(vacc, 31));
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output = (int8_t) vout;
}
}
| 2,217
| 28.573333
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-scalar-andxor-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-andxor.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__scalar_andxor_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_andxor.input_zero_point;
const int32_t vmultiplier_diff = params->scalar_andxor.multiplier_diff;
const int32_t vmultiplier_base = params->scalar_andxor.multiplier_base;
const int32_t vbias = params->scalar_andxor.bias;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
int32_t vacc2 = (int32_t) input[2];
int32_t vacc3 = (int32_t) input[3];
input += 4;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
vacc2 -= vinput_zero_point;
vacc3 -= vinput_zero_point;
int32_t vmultiplier0 = math_asr_s32(vacc0, 31);
int32_t vmultiplier1 = math_asr_s32(vacc1, 31);
int32_t vmultiplier2 = math_asr_s32(vacc2, 31);
int32_t vmultiplier3 = math_asr_s32(vacc3, 31);
vmultiplier0 &= vmultiplier_diff;
vmultiplier1 &= vmultiplier_diff;
vmultiplier2 &= vmultiplier_diff;
vmultiplier3 &= vmultiplier_diff;
vmultiplier0 ^= vmultiplier_base;
vmultiplier1 ^= vmultiplier_base;
vmultiplier2 ^= vmultiplier_base;
vmultiplier3 ^= vmultiplier_base;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
vacc2 = vbias + vacc2 * vmultiplier2;
vacc3 = vbias + vacc3 * vmultiplier3;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
int32_t vout2 = math_asr_s32(vacc2, 8);
int32_t vout3 = math_asr_s32(vacc3, 8);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout2 = math_max_s32(vout2, -128);
vout3 = math_max_s32(vout3, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
vout2 = math_min_s32(vout2, 127);
vout3 = math_min_s32(vout3, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = vmultiplier_base ^ (vmultiplier_diff & math_asr_s32(vacc, 31));
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 3,090
| 30.222222
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-scalar-select-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-select.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__scalar_select_x1(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_select.input_zero_point;
const int32_t vpositive_multiplier = params->scalar_select.positive_multiplier;
const int32_t vnegative_multiplier = params->scalar_select.negative_multiplier;
const int32_t vbias = params->scalar_select.bias;
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = XNN_UNPREDICTABLE(vacc >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,367
| 30.090909
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-scalar-select-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-select.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__scalar_select_x2(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_select.input_zero_point;
const int32_t vpositive_multiplier = params->scalar_select.positive_multiplier;
const int32_t vnegative_multiplier = params->scalar_select.negative_multiplier;
const int32_t vbias = params->scalar_select.bias;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
input += 2;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
const int32_t vmultiplier0 = XNN_UNPREDICTABLE(vacc0 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier1 = XNN_UNPREDICTABLE(vacc1 >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = XNN_UNPREDICTABLE(vacc >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output = (int8_t) vout;
}
}
| 2,207
| 31
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-scalar-select-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-select.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__scalar_select_x4(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_select.input_zero_point;
const int32_t vpositive_multiplier = params->scalar_select.positive_multiplier;
const int32_t vnegative_multiplier = params->scalar_select.negative_multiplier;
const int32_t vbias = params->scalar_select.bias;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
int32_t vacc2 = (int32_t) input[2];
int32_t vacc3 = (int32_t) input[3];
input += 4;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
vacc2 -= vinput_zero_point;
vacc3 -= vinput_zero_point;
const int32_t vmultiplier0 = XNN_UNPREDICTABLE(vacc0 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier1 = XNN_UNPREDICTABLE(vacc1 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier2 = XNN_UNPREDICTABLE(vacc2 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier3 = XNN_UNPREDICTABLE(vacc3 >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
vacc2 = vbias + vacc2 * vmultiplier2;
vacc3 = vbias + vacc3 * vmultiplier3;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
int32_t vout2 = math_asr_s32(vacc2, 8);
int32_t vout3 = math_asr_s32(vacc3, 8);
vout0 = math_max_s32(vout0, -128);
vout1 = math_max_s32(vout1, -128);
vout2 = math_max_s32(vout2, -128);
vout3 = math_max_s32(vout3, -128);
vout0 = math_min_s32(vout0, 127);
vout1 = math_min_s32(vout1, 127);
vout2 = math_min_s32(vout2, 127);
vout3 = math_min_s32(vout3, 127);
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = XNN_UNPREDICTABLE(vacc >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, -128);
vout = math_min_s32(vout, 127);
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 3,044
| 33.213483
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vlrelu_ukernel__sse2_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc0, vacc1);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy0);
vy0 >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy0;
}
}
}
| 5,094
| 35.654676
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vlrelu_ukernel__sse2_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
__m128i vextx0 = _mm_unpacklo_epi8(vx0, vm0);
__m128i vextx1 = _mm_unpackhi_epi8(vx0, vm0);
const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
__m128i vextx2 = _mm_unpacklo_epi8(vx1, vm1);
__m128i vextx3 = _mm_unpackhi_epi8(vx1, vm1);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vextx2, vinput_zero_point);
vextx2 = _mm_sub_epi16(vinput_zero_point, vextx2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vextx3, vinput_zero_point);
vextx3 = _mm_sub_epi16(vinput_zero_point, vextx3);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
__m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier2);
__m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier3);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodlo2 = _mm_srli_epi16(vprodlo2, 7);
__m128i vprodhi2 = _mm_mulhi_epi16(vextx2, vmultiplier2);
vprodlo3 = _mm_srli_epi16(vprodlo3, 7);
__m128i vprodhi3 = _mm_mulhi_epi16(vextx3, vmultiplier3);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
vprodhi2 = _mm_slli_epi16(vprodhi2, 8);
vprodlo2 = _mm_avg_epu16(vprodlo2, vzero);
vprodhi3 = _mm_slli_epi16(vprodhi3, 8);
vprodlo3 = _mm_avg_epu16(vprodlo3, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
__m128i vacc2 = _mm_add_epi16(vprodlo2, vprodhi2);
__m128i vacc3 = _mm_add_epi16(vprodlo3, vprodhi3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vextx0 = _mm_unpacklo_epi8(vx, vm);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc0, vacc1);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy0);
vy0 >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy0;
}
}
}
| 8,401
| 39.009524
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__sse41_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,967
| 36.433962
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__sse41_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
__m128i vacc0 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
vacc2 = _mm_slli_epi16(vacc2, 7);
vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
vacc3 = _mm_slli_epi16(vacc3, 7);
vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,047
| 39.709677
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qs8_vlrelu_ukernel__sse41_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packs_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
__m128i vacc = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,785
| 34.717949
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-ssse3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vlrelu_ukernel__ssse3_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 4,263
| 39.226415
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-ssse3-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_vlrelu_ukernel__ssse3_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
const __m128i vm0 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx0);
__m128i vacc0 = _mm_unpacklo_epi8(vx0, vm0);
__m128i vacc1 = _mm_unpackhi_epi8(vx0, vm0);
const __m128i vm1 = _mm_cmpgt_epi8(_mm_setzero_si128(), vx1);
__m128i vacc2 = _mm_unpacklo_epi8(vx1, vm1);
__m128i vacc3 = _mm_unpackhi_epi8(vx1, vm1);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
vacc2 = _mm_slli_epi16(vacc2, 7);
vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
vacc3 = _mm_slli_epi16(vacc3, 7);
vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packs_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packs_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 15 * sizeof(int8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vm = _mm_cmpgt_epi8(_mm_setzero_si128(), vx);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vm);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vm);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packs_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) vy_lo;
}
}
}
| 6,650
| 41.363057
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmrelaxedsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vx0 = wasm_v128_load(input);
input += 16;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,859
| 37.6
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmrelaxedsimd-arm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_arm_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
input += 32;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
v128_t vacc2 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx1));
v128_t vacc3 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx1));
v128_t vmultiplier2 = wasm_i16x8_shr(vacc2, 15);
v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier2);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier3);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc2 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc2, vmultiplier2);
vacc3 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,866
| 41.321739
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmrelaxedsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
input += 16;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,920
| 36.342857
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmrelaxedsimd-x86-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
v128_t vacc2 = wasm_i16x8_load8x8(input + 16);
v128_t vacc3 = wasm_i16x8_load8x8(input + 24);
input += 32;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
v128_t vmultiplier2 = wasm_i16x8_gt(vacc2, vinput_zero_point);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vmultiplier2 = wasm_v128_and(vmultiplier2, vmultiplier_diff);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = wasm_v128_xor(vmultiplier2, vmultiplier_base);
vmultiplier3 = wasm_v128_and(vmultiplier3, vmultiplier_diff);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = wasm_v128_xor(vmultiplier3, vmultiplier_base);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc2 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc2, vmultiplier2);
vacc3 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,971
| 39.422764
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmrelaxedsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmrelaxedsimd_x86_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,765
| 34.922078
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmsimd_arm_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vx0 = wasm_v128_load(input);
input += 16;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,708
| 36.09
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmsimd-arm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmsimd_arm_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
input += 32;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
v128_t vacc2 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_low_i8x16(vx1));
v128_t vacc3 = wasm_i16x8_sub(vinput_zero_point, wasm_i16x8_extend_high_i8x16(vx1));
v128_t vmultiplier2 = wasm_i16x8_shr(vacc2, 15);
v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier2);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier3);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc2 = wasm_i16x8_q15mulr_sat(vacc2, vmultiplier2);
vacc3 = wasm_i16x8_q15mulr_sat(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
const v128_t vx = wasm_i16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,643
| 39.382609
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x16(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
input += 16;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,849
| 35.666667
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmsimd-x86-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x32(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 32 * sizeof(int8_t); batch -= 32 * sizeof(int8_t)) {
v128_t vacc0 = wasm_i16x8_load8x8(input);
v128_t vacc1 = wasm_i16x8_load8x8(input + 8);
v128_t vacc2 = wasm_i16x8_load8x8(input + 16);
v128_t vacc3 = wasm_i16x8_load8x8(input + 24);
input += 32;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
v128_t vmultiplier2 = wasm_i16x8_gt(vacc2, vinput_zero_point);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vmultiplier2 = wasm_v128_and(vmultiplier2, vmultiplier_diff);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = wasm_v128_xor(vmultiplier2, vmultiplier_base);
vmultiplier3 = wasm_v128_and(vmultiplier3, vmultiplier_diff);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = wasm_v128_xor(vmultiplier3, vmultiplier_base);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc2 = wasm_i16x8_q15mulr_sat(vacc2, vmultiplier2);
vacc3 = wasm_i16x8_q15mulr_sat(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_i8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_i8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,868
| 38.585366
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vlrelu/gen/qs8-vlrelu-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qs8_vlrelu_ukernel__wasmsimd_x86_x8(
size_t batch,
const int8_t* input,
int8_t* output,
const union xnn_qs8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(int8_t));
assert(batch <= 7 * sizeof(int8_t));
v128_t vacc = wasm_i16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_i8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,726
| 34.415584
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vxb89ABCDEF = _mm_sub_epi16(vb89ABCDEF, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,235
| 42.006897
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,830
| 38.92562
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__neon_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(params->fp32_neon.a_zero_point);
const int8x16_t vb_zero_point = vld1q_dup_s8(params->fp32_neon.b_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neon.b_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
const int8x16_t vb0123456789ABCDEF = vld1q_s8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vget_low_s8(vb_zero_point));
const int16x8_t vxb89ABCDEF = vsubl_high_s8(vb0123456789ABCDEF, vb_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vb_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vget_high_s8(vb0123456789ABCDEF), vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vget_low_s8(vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,249
| 43.207317
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__neon_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neon.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vb89ABCDEF = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vb89ABCDEF, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,316
| 41.395973
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__neon_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neon.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int8x8_t vb01234567 = vld1_s8(input_b);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,775
| 36.606299
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-neonv8-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__neonv8_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(params->fp32_neonv8.a_zero_point);
const int8x16_t vb_zero_point = vld1q_dup_s8(params->fp32_neonv8.b_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neonv8.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neonv8.b_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
const int8x16_t vb0123456789ABCDEF = vld1q_s8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vget_low_s8(vb_zero_point));
const int16x8_t vxb89ABCDEF = vsubl_high_s8(vb0123456789ABCDEF, vb_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vb_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vget_high_s8(vb0123456789ABCDEF), vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vget_low_s8(vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,763
| 41.540881
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-neonv8-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__neonv8_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neonv8.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neonv8.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vb89ABCDEF = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vb89ABCDEF, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,826
| 39.465278
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-neonv8-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__neonv8_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neonv8.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neonv8.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int8x8_t vb01234567 = vld1_s8(input_b);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,428
| 35.00813
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x1(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vb = (int32_t) *input_b++ - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,794
| 33.519231
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x2(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
input_a += 2;
const int32_t vb0 = input_b[0] - vb_zero_point;
const int32_t vb1 = input_b[1] - vb_zero_point;
input_b += 2;
const int32_t vacc0 = va0 * vb0;
const int32_t vacc1 = va1 * vb1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = (int32_t) *input_a - va_zero_point;
const int32_t vb = (int32_t) *input_b - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (int8_t) vout;
}
}
| 2,849
| 34.185185
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__scalar_x4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
const int32_t va2 = input_a[2] - va_zero_point;
const int32_t va3 = input_a[3] - va_zero_point;
input_a += 4;
const int32_t vb0 = input_b[0] - vb_zero_point;
const int32_t vb1 = input_b[1] - vb_zero_point;
const int32_t vb2 = input_b[2] - vb_zero_point;
const int32_t vb3 = input_b[3] - vb_zero_point;
input_b += 4;
const int32_t vacc0 = va0 * vb0;
const int32_t vacc1 = va1 * vb1;
const int32_t vacc2 = va2 * vb2;
const int32_t vacc3 = va3 * vb3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
const int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
const int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vb = (int32_t) *input_b++ - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 3,892
| 36.796117
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
input_a += 16;
input_b += 16;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
vb89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vb89ABCDEF, vb89ABCDEF), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vxb89ABCDEF = _mm_sub_epi16(vb89ABCDEF, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,622
| 42.287582
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
vb01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vb01234567, vb01234567), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 4,986
| 38.896
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vxb89ABCDEF = _mm_sub_epi16(vb89ABCDEF, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,237
| 42.02069
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,832
| 38.942149
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-wasmsimd-mul32-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_i16x8_load8x8(input_b + 8);
input_a += 16;
input_b += 16;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
const v128_t vxa89ABCDEF = wasm_i16x8_sub(va89ABCDEF, va_zero_point);
const v128_t vxb89ABCDEF = wasm_i16x8_sub(vb89ABCDEF, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
v128_t vacc89AB = wasm_i32x4_extmul_low_i16x8(vxa89ABCDEF, vxb89ABCDEF);
v128_t vaccCDEF = wasm_i32x4_extmul_high_i16x8(vxa89ABCDEF, vxb89ABCDEF);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,055
| 40.479452
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-fp32-wasmsimd-mul32-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_i16x8_load8x8(input_b);
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 4,684
| 38.041667
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-rndnu-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_rndnu_ukernel__neon_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(params->rndnu_neon.a_zero_point);
const int8x16_t vb_zero_point = vld1q_dup_s8(params->rndnu_neon.b_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(params->rndnu_neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->rndnu_neon.b_zero_point);
#endif
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
const int8x16_t vb0123456789ABCDEF = vld1q_s8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vget_low_s8(vb_zero_point));
const int16x8_t vxb89ABCDEF = vsubl_high_s8(vb0123456789ABCDEF, vb_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vget_low_s8(vb0123456789ABCDEF), vb_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vget_high_s8(vb0123456789ABCDEF), vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vget_low_s8(vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,993
| 42.7125
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-rndnu-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_rndnu_ukernel__neon_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->rndnu_neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->rndnu_neon.b_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int8x8_t vb89ABCDEF = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
const int16x8_t vxb89ABCDEF = vsubl_s8(vb89ABCDEF, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,058
| 40.786207
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmul/gen/qs8-vmul-minmax-rndnu-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmul_minmax_rndnu_ukernel__neon_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->rndnu_neon.a_zero_point);
const int8x8_t vb_zero_point = vld1_dup_s8(params->rndnu_neon.b_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t vb01234567 = vld1_s8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int8x8_t vb01234567 = vld1_s8(input_b);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxb01234567 = vsubl_s8(vb01234567, vb_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,628
| 36.330645
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,753
| 40.395683
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,558
| 37.635593
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__neon_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(params->fp32_neon.a_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neon.a_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neon.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,530
| 40.598726
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__neon_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neon.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neon.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,958
| 40.096552
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__neon_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neon.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neon.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,582
| 35.664
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-neonv8-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__neonv8_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(params->fp32_neonv8.a_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neonv8.a_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neonv8.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,042
| 38.756579
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-neonv8-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neonv8.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neonv8.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,468
| 38.064286
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-neonv8-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->fp32_neonv8.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->fp32_neonv8.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,235
| 34.008264
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x1(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
| 1,745
| 33.235294
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x2(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
for (; batch >= 2 * sizeof(int8_t); batch -= 2 * sizeof(int8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
input_a += 2;
const int32_t vacc0 = va0 * vb;
const int32_t vacc1 = va1 * vb;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = (int32_t) *input_a - va_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (int8_t) vout;
}
}
| 2,677
| 34.236842
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__scalar_x4(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
for (; batch >= 4 * sizeof(int8_t); batch -= 4 * sizeof(int8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
const int32_t va2 = input_a[2] - va_zero_point;
const int32_t va3 = input_a[3] - va_zero_point;
input_a += 4;
const int32_t vacc0 = va0 * vb;
const int32_t vacc1 = va1 * vb;
const int32_t vacc2 = va2 * vb;
const int32_t vacc3 = va3 * vb;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
const int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
const int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (int8_t) vout0;
output[1] = (int8_t) vout1;
output[2] = (int8_t) vout2;
output[3] = (int8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (int8_t) vout;
batch -= sizeof(int8_t);
} while (batch != 0);
}
}
| 3,610
| 36.614583
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
input_a += 16;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
va89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(va89ABCDEF, va89ABCDEF), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout89ABCDEF = _mm_max_epi16(vout89ABCDEF, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
vout89ABCDEF = _mm_min_epi16(vout89ABCDEF, voutput_max);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,976
| 40.506944
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
va01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(va01234567, va01234567), 8);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
vout01234567 = _mm_max_epi16(vout01234567, voutput_min);
vout01234567 = _mm_min_epi16(vout01234567, voutput_max);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 4,604
| 37.375
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packs_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epi8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epi8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,755
| 40.410072
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse4.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse4.b_zero_point));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packs_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epi8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epi8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
*output = (int8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,560
| 37.652542
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-wasmsimd-mul32-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
const v128_t vxb = wasm_i16x8_sub(
wasm_i16x8_splat((int16_t) *input_b), wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point));
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_i16x8_load8x8(input_a + 8);
input_a += 16;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxa89ABCDEF = wasm_i16x8_sub(va89ABCDEF, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
v128_t vacc89AB = wasm_i32x4_extmul_low_i16x8(vxa89ABCDEF, vxb);
v128_t vaccCDEF = wasm_i32x4_extmul_high_i16x8(vxa89ABCDEF, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_i8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_i8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
input_a += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,612
| 39.381295
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-fp32-wasmsimd-mul32-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
const v128_t vxb = wasm_i16x8_sub(
wasm_i16x8_splat((int16_t) *input_b), wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point));
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
input_a += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_i16x8_load8x8(input_a);
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_i8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_i8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(int8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(int8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 4,415
| 37.068966
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-rndnu-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_rndnu_ukernel__neon_ld128_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const int8x16_t va_zero_point = vld1q_dup_s8(params->rndnu_neon.a_zero_point);
#else
const int8x8_t va_zero_point = vld1_dup_s8(params->rndnu_neon.a_zero_point);
#endif
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->rndnu_neon.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x16_t va0123456789ABCDEF = vld1q_s8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), vget_low_s8(va_zero_point));
const int16x8_t vxa89ABCDEF = vsubl_high_s8(va0123456789ABCDEF, va_zero_point);
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(vget_low_s8(va0123456789ABCDEF), va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(vget_high_s8(va0123456789ABCDEF), va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, vget_low_s8(va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,273
| 40.006536
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-rndnu-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_rndnu_ukernel__neon_ld64_x16(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->rndnu_neon.a_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->rndnu_neon.output_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->rndnu_neon.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->rndnu_neon.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 16 * sizeof(int8_t); batch -= 16 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int8x8_t va89ABCDEF = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
const int16x8_t vxa89ABCDEF = vsubl_s8(va89ABCDEF, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x16_t vout0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc01234567), vacc89ABCDEF);
#else
int8x16_t vout0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc01234567), vqmovn_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_s8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_s8(vout0123456789ABCDEF, voutput_max);
vst1q_s8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, vget_low_s8(voutput_min));
vout01234567 = vmin_s8(vout01234567, vget_low_s8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(int8_t))) {
vst1_s8(output, vout01234567); output += 8;
batch -= 8 * sizeof(int8_t);
} else {
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,700
| 39.432624
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-vmulc/gen/qs8-vmulc-minmax-rndnu-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qs8_vmulc_minmax_rndnu_ukernel__neon_ld64_x8(
size_t batch,
const int8_t* input_a,
const int8_t* input_b,
int8_t* output,
const union xnn_qs8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(int8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int8x8_t va_zero_point = vld1_dup_s8(params->rndnu_neon.a_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const int8x8_t voutput_min = vld1_dup_s8(¶ms->rndnu_neon.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->rndnu_neon.output_max);
const int8x8_t vb = vld1_dup_s8(input_b);
const int8x8_t vb_zero_point = vld1_dup_s8(params->rndnu_neon.b_zero_point);
const int16x8_t vxb = vsubl_s8(vb, vb_zero_point);
for (; batch >= 8 * sizeof(int8_t); batch -= 8 * sizeof(int8_t)) {
const int8x8_t va01234567 = vld1_s8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#else
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
#endif
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
vst1_s8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const int8x8_t va01234567 = vld1_s8(input_a);
const int16x8_t vxa01234567 = vsubl_s8(va01234567, va_zero_point);
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
int8x8_t vout01234567 = vqmovn_s16(vacc01234567);
vout01234567 = vmax_s8(vout01234567, voutput_min);
vout01234567 = vmin_s8(vout01234567, voutput_max);
if (batch & (4 * sizeof(int8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s8(vout01234567), 0); output += 4;
vout01234567 = vext_s8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(int8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_s8(vout01234567), 0); output += 2;
vout01234567 = vext_s8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(int8_t))) {
vst1_lane_s8(output, vout01234567, 0);
}
}
}
}
| 4,435
| 35.360656
| 93
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-avgpool/qu8-avgpool-9p8x-minmax-fp32-neon-c8.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/common.h>
void xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__neon_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const int32x4_t vinit_bias = vld1q_dup_s32(¶ms->fp32_neon.init_bias);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
do {
{
const uint8_t* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
int32_t* b = buffer;
for (size_t c = 0; c < channels; c += 8) {
const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
const uint8x8_t vi8 = vld1_u8(i8); i8 += 8;
const uint16x8_t vsum018 = vaddw_u8(vaddl_u8(vi0, vi1), vi8);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum2345 = vaddq_u16(vsum23, vsum45);
const uint16x8_t vsum01678 = vaddq_u16(vsum018, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum2345, vsum01678);
const int32x4_t vacc_lo = vaddw_s16(vinit_bias, vreinterpret_s16_u16(vget_low_u16(vsum)));
const int32x4_t vacc_hi = vaddw_s16(vinit_bias, vreinterpret_s16_u16(vget_high_u16(vsum)));
vst1q_s32(b, vacc_lo); b += 4;
vst1q_s32(b, vacc_hi); b += 4;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const uint8_t* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
int32_t* b = buffer;
for (size_t c = 0; c < channels; c += 8) {
const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
int32x4_t vacc_lo = vld1q_s32(b);
int32x4_t vacc_hi = vld1q_s32(b + 4);
const uint16x8_t vsum01 = vaddl_u8(vi0, vi1);
const uint16x8_t vsum23 = vaddl_u8(vi2, vi3);
const uint16x8_t vsum45 = vaddl_u8(vi4, vi5);
const uint16x8_t vsum67 = vaddl_u8(vi6, vi7);
const uint16x8_t vsum0123 = vaddq_u16(vsum01, vsum23);
const uint16x8_t vsum4567 = vaddq_u16(vsum45, vsum67);
const uint16x8_t vsum = vaddq_u16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vreinterpret_s16_u16(vget_low_u16(vsum)));
vacc_hi = vaddw_s16(vacc_hi, vreinterpret_s16_u16(vget_high_u16(vsum)));
vst1q_s32(b, vacc_lo); b += 4;
vst1q_s32(b, vacc_hi); b += 4;
}
}
{
const uint8_t* i0 = input[0];
assert(i0 != NULL);
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
input = (const uint8_t**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
int32_t* b = buffer;
while (c >= 8) {
const uint8x8_t vi0 = vld1_u8(i0); i0 += 8;
const uint8x8_t vi1 = vld1_u8(i1); i1 += 8;
const uint8x8_t vi2 = vld1_u8(i2); i2 += 8;
const uint8x8_t vi3 = vld1_u8(i3); i3 += 8;
const uint8x8_t vi4 = vld1_u8(i4); i4 += 8;
const uint8x8_t vi5 = vld1_u8(i5); i5 += 8;
const uint8x8_t vi6 = vld1_u8(i6); i6 += 8;
const uint8x8_t vi7 = vld1_u8(i7); i7 += 8;
int32x4_t vacc_lo = vld1q_s32(b); b += 4;
int32x4_t vacc_hi = vld1q_s32(b); b += 4;
const int16x8_t vsum01 = vreinterpretq_s16_u16(vaddl_u8(vi0, vi1));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
const int16x8_t vsum67 = vreinterpretq_s16_u16(vaddl_u8(vi6, vi7));
const int16x8_t vsum0123 = vaddq_s16(vsum01, vsum23);
const int16x8_t vsum4567 = vaddq_s16(vsum45, vsum67);
const int16x8_t vsum = vaddq_s16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum));
float32x4_t vfpacc_lo = vcvtq_f32_s32(vacc_lo);
float32x4_t vfpacc_hi = vcvtq_f32_s32(vacc_hi);
vfpacc_lo = vmulq_f32(vfpacc_lo, vscale);
vfpacc_hi = vmulq_f32(vfpacc_hi, vscale);
vacc_lo = vreinterpretq_s32_f32(vaddq_f32(vfpacc_lo, vmagic_bias));
vacc_hi = vreinterpretq_s32_f32(vaddq_f32(vfpacc_hi, vmagic_bias));
vacc_lo = vqsubq_s32(vacc_lo, vmagic_bias_less_output_zero_point);
vacc_hi = vqsubq_s32(vacc_hi, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc = vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi);
#else
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
#endif
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
vst1_u8(output, vout); output += 8;
c -= 8;
}
if (c != 0) {
const uint8x8_t vi0 = vld1_u8(i0);
const uint8x8_t vi1 = vld1_u8(i1);
const uint8x8_t vi2 = vld1_u8(i2);
const uint8x8_t vi3 = vld1_u8(i3);
const uint8x8_t vi4 = vld1_u8(i4);
const uint8x8_t vi5 = vld1_u8(i5);
const uint8x8_t vi6 = vld1_u8(i6);
const uint8x8_t vi7 = vld1_u8(i7);
int32x4_t vacc_lo = vld1q_s32(b); b += 4;
int32x4_t vacc_hi = vld1q_s32(b);
const int16x8_t vsum01 = vreinterpretq_s16_u16(vaddl_u8(vi0, vi1));
const int16x8_t vsum23 = vreinterpretq_s16_u16(vaddl_u8(vi2, vi3));
const int16x8_t vsum45 = vreinterpretq_s16_u16(vaddl_u8(vi4, vi5));
const int16x8_t vsum67 = vreinterpretq_s16_u16(vaddl_u8(vi6, vi7));
const int16x8_t vsum0123 = vaddq_s16(vsum01, vsum23);
const int16x8_t vsum4567 = vaddq_s16(vsum45, vsum67);
const int16x8_t vsum = vaddq_s16(vsum0123, vsum4567);
vacc_lo = vaddw_s16(vacc_lo, vget_low_s16(vsum));
vacc_hi = vaddw_s16(vacc_hi, vget_high_s16(vsum));
float32x4_t vfpacc_lo = vcvtq_f32_s32(vacc_lo);
float32x4_t vfpacc_hi = vcvtq_f32_s32(vacc_hi);
vfpacc_lo = vmulq_f32(vfpacc_lo, vscale);
vfpacc_hi = vmulq_f32(vfpacc_hi, vscale);
vacc_lo = vreinterpretq_s32_f32(vaddq_f32(vfpacc_lo, vmagic_bias));
vacc_hi = vreinterpretq_s32_f32(vaddq_f32(vfpacc_hi, vmagic_bias));
vacc_lo = vqsubq_s32(vacc_lo, vmagic_bias_less_output_zero_point);
vacc_hi = vqsubq_s32(vacc_hi, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc = vqmovn_high_s32(vqmovn_s32(vacc_lo), vacc_hi);
#else
int16x8_t vacc = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
#endif
uint8x8_t vout = vqmovun_s16(vacc);
vout = vmax_u8(vout, voutput_min);
vout = vmin_u8(vout, voutput_max);
if (c & 4) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout), 0); output += 4;
vout = vext_u8(vout, vout, 4);
}
if (c & 2) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout), 0); output += 2;
vout = vext_u8(vout, vout, 2);
}
if (c & 1) {
vst1_lane_u8(output, vout, 0); output += 1;
}
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 13,263
| 34.945799
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-avgpool/qu8-avgpool-9p8x-minmax-fp32-scalar-imagic-c1.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/math.h>
void xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__scalar_imagic_c1(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const int32_t vinit_bias = params->fp32_scalar_imagic.init_bias;
const float vscale = params->fp32_scalar_imagic.scale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
do {
// First pass.
{
const uint8_t* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = vinit_bias;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
vacc += vi0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
vacc += vi1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
vacc += vi2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
vacc += vi3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
vacc += vi4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
vacc += vi5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
vacc += vi6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
vacc += vi7;
const int32_t vi8 = (int32_t) (uint32_t) *i8++;
vacc += vi8;
*b++ = vacc;
} while (--c != 0);
}
size_t k = kernel_elements;
// Intermediate passes.
for (k -= 9; k > 8; k -= 8) {
const uint8_t* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
int32_t* b = buffer;
size_t c = channels;
do {
int32_t vacc = *b;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
vacc += vi0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
vacc += vi1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
vacc += vi2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
vacc += vi3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
vacc += vi4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
vacc += vi5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
vacc += vi6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
vacc += vi7;
*b++ = vacc;
} while (--c != 0);
}
// Last pass.
{
const uint8_t* i0 = input[0];
assert(i0 != NULL);
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
input = (const uint8_t**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
int32_t* b = buffer;
do {
int32_t vacc = *b++;
const int32_t vi0 = (int32_t) (uint32_t) *i0++;
vacc += vi0;
const int32_t vi1 = (int32_t) (uint32_t) *i1++;
vacc += vi1;
const int32_t vi2 = (int32_t) (uint32_t) *i2++;
vacc += vi2;
const int32_t vi3 = (int32_t) (uint32_t) *i3++;
vacc += vi3;
const int32_t vi4 = (int32_t) (uint32_t) *i4++;
vacc += vi4;
const int32_t vi5 = (int32_t) (uint32_t) *i5++;
vacc += vi5;
const int32_t vi6 = (int32_t) (uint32_t) *i6++;
vacc += vi6;
const int32_t vi7 = (int32_t) (uint32_t) *i7++;
vacc += vi7;
float vfpacc = (float) vacc * vscale;
vfpacc += vmagic_bias;
int32_t vout = (int32_t) float_as_uint32(vfpacc);
vout = math_max_s32(vout, vmagic_min);
vout = math_min_s32(vout, vmagic_max);
vout -= vmagic_bias_less_zero_point;
*output++ = (uint8_t) vout;
} while (--c != 0);
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 8,905
| 30.249123
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-avgpool/qu8-avgpool-9p8x-minmax-fp32-sse2-c8.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/avgpool.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_avgpool_minmax_fp32_ukernel_9p8x__sse2_c8(
size_t output_pixels,
size_t kernel_elements,
size_t channels,
const uint8_t** input,
size_t input_offset,
const uint8_t* zero,
int32_t* buffer,
uint8_t* output,
size_t input_increment,
size_t output_increment,
const union xnn_qu8_avgpool_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(output_pixels != 0);
assert(kernel_elements > 9);
assert(channels != 0);
const __m128i vzero = _mm_setzero_si128();
const __m128i vinit_bias = _mm_load_si128((const __m128i*) params->fp32_sse2.init_bias);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
do {
{
const uint8_t* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
const uint8_t* i8 = *input++;
assert(i8 != NULL);
if XNN_UNPREDICTABLE(i8 != zero) {
i8 = (const uint8_t*) ((uintptr_t) i8 + input_offset);
}
int32_t* b = buffer;
for (size_t c = 0; c < channels; c += 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8;
const __m128i vi8 = _mm_loadl_epi64((const __m128i*) i8); i8 += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vxi8 = _mm_unpacklo_epi8(vi8, vzero);
const __m128i vsum018 = _mm_add_epi16(_mm_add_epi16(vxi0, vxi1), vxi8);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum2345 = _mm_add_epi16(vsum23, vsum45);
const __m128i vsum01678 = _mm_add_epi16(vsum018, vsum67);
const __m128i vsum = _mm_add_epi16(vsum2345, vsum01678);
const __m128i vacc_lo = _mm_add_epi32(vinit_bias, _mm_unpacklo_epi16(vsum, vzero));
const __m128i vacc_hi = _mm_add_epi32(vinit_bias, _mm_unpackhi_epi16(vsum, vzero));
_mm_store_si128((__m128i*) b, vacc_lo);
_mm_store_si128((__m128i*) b + 1, vacc_hi);
b += 8;
}
}
size_t k = kernel_elements;
for (k -= 9; k > 8; k -= 8) {
const uint8_t* i0 = *input++;
assert(i0 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
const uint8_t* i1 = *input++;
assert(i1 != NULL);
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
const uint8_t* i2 = *input++;
assert(i2 != NULL);
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
const uint8_t* i3 = *input++;
assert(i3 != NULL);
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
const uint8_t* i4 = *input++;
assert(i4 != NULL);
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
const uint8_t* i5 = *input++;
assert(i5 != NULL);
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
const uint8_t* i6 = *input++;
assert(i6 != NULL);
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
const uint8_t* i7 = *input++;
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
int32_t* b = buffer;
for (size_t c = 0; c < channels; c += 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8;
__m128i vacc_lo = _mm_load_si128((const __m128i*) b);
__m128i vacc_hi = _mm_load_si128((const __m128i*) b + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
_mm_store_si128((__m128i*) b, vacc_lo);
_mm_store_si128((__m128i*) b + 1, vacc_hi);
b += 8;
}
}
{
const uint8_t* i0 = input[0];
assert(i0 != NULL);
const uint8_t* i1 = input[1];
const uint8_t* i2 = input[2];
const uint8_t* i3 = input[3];
const uint8_t* i4 = input[4];
const uint8_t* i5 = input[5];
const uint8_t* i6 = input[6];
const uint8_t* i7 = input[7];
input = (const uint8_t**) ((uintptr_t) input + input_increment);
if (k < 2) {
i1 = zero;
}
assert(i1 != NULL);
if (k <= 2) {
i2 = zero;
}
assert(i2 != NULL);
if (k < 4) {
i3 = zero;
}
assert(i3 != NULL);
if (k <= 4) {
i4 = zero;
}
assert(i4 != NULL);
if (k < 6) {
i5 = zero;
}
assert(i5 != NULL);
if (k <= 6) {
i6 = zero;
}
assert(i6 != NULL);
if (k < 8) {
i7 = zero;
}
assert(i7 != NULL);
if XNN_UNPREDICTABLE(i0 != zero) {
i0 = (const uint8_t*) ((uintptr_t) i0 + input_offset);
}
if XNN_UNPREDICTABLE(i1 != zero) {
i1 = (const uint8_t*) ((uintptr_t) i1 + input_offset);
}
if XNN_UNPREDICTABLE(i2 != zero) {
i2 = (const uint8_t*) ((uintptr_t) i2 + input_offset);
}
if XNN_UNPREDICTABLE(i3 != zero) {
i3 = (const uint8_t*) ((uintptr_t) i3 + input_offset);
}
if XNN_UNPREDICTABLE(i4 != zero) {
i4 = (const uint8_t*) ((uintptr_t) i4 + input_offset);
}
if XNN_UNPREDICTABLE(i5 != zero) {
i5 = (const uint8_t*) ((uintptr_t) i5 + input_offset);
}
if XNN_UNPREDICTABLE(i6 != zero) {
i6 = (const uint8_t*) ((uintptr_t) i6 + input_offset);
}
if XNN_UNPREDICTABLE(i7 != zero) {
i7 = (const uint8_t*) ((uintptr_t) i7 + input_offset);
}
size_t c = channels;
int32_t* b = buffer;
while (c >= 8) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0); i0 += 8;
const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1); i1 += 8;
const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2); i2 += 8;
const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3); i3 += 8;
const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4); i4 += 8;
const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5); i5 += 8;
const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6); i6 += 8;
const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7); i7 += 8;
__m128i vacc_lo = _mm_load_si128((const __m128i*) b);
__m128i vacc_hi = _mm_load_si128((const __m128i*) b + 1);
b += 8;
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
__m128 vfpacc_lo = _mm_cvtepi32_ps(vacc_lo);
__m128 vfpacc_hi = _mm_cvtepi32_ps(vacc_hi);
vfpacc_lo = _mm_mul_ps(vfpacc_lo, vscale);
vfpacc_hi = _mm_mul_ps(vfpacc_hi, vscale);
vfpacc_lo = _mm_min_ps(vfpacc_lo, voutput_max_less_zero_point);
vfpacc_hi = _mm_min_ps(vfpacc_hi, voutput_max_less_zero_point);
vacc_lo = _mm_cvtps_epi32(vfpacc_lo);
vacc_hi = _mm_cvtps_epi32(vfpacc_hi);
__m128i vout = _mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_max_epu8(vout, voutput_min);
_mm_storel_epi64((__m128i*) output, vout);
output += 8;
c -= 8;
}
if (c != 0) {
const __m128i vi0 = _mm_loadl_epi64((const __m128i*) i0);
const __m128i vi1 = _mm_loadl_epi64((const __m128i*) i1);
const __m128i vi2 = _mm_loadl_epi64((const __m128i*) i2);
const __m128i vi3 = _mm_loadl_epi64((const __m128i*) i3);
const __m128i vi4 = _mm_loadl_epi64((const __m128i*) i4);
const __m128i vi5 = _mm_loadl_epi64((const __m128i*) i5);
const __m128i vi6 = _mm_loadl_epi64((const __m128i*) i6);
const __m128i vi7 = _mm_loadl_epi64((const __m128i*) i7);
__m128i vacc_lo = _mm_load_si128((const __m128i*) b);
__m128i vacc_hi = _mm_load_si128((const __m128i*) b + 1);
const __m128i vxi0 = _mm_unpacklo_epi8(vi0, vzero);
const __m128i vxi1 = _mm_unpacklo_epi8(vi1, vzero);
const __m128i vxi2 = _mm_unpacklo_epi8(vi2, vzero);
const __m128i vxi3 = _mm_unpacklo_epi8(vi3, vzero);
const __m128i vxi4 = _mm_unpacklo_epi8(vi4, vzero);
const __m128i vxi5 = _mm_unpacklo_epi8(vi5, vzero);
const __m128i vxi6 = _mm_unpacklo_epi8(vi6, vzero);
const __m128i vxi7 = _mm_unpacklo_epi8(vi7, vzero);
const __m128i vsum01 = _mm_add_epi16(vxi0, vxi1);
const __m128i vsum23 = _mm_add_epi16(vxi2, vxi3);
const __m128i vsum45 = _mm_add_epi16(vxi4, vxi5);
const __m128i vsum67 = _mm_add_epi16(vxi6, vxi7);
const __m128i vsum0123 = _mm_add_epi16(vsum01, vsum23);
const __m128i vsum4567 = _mm_add_epi16(vsum45, vsum67);
const __m128i vsum = _mm_add_epi16(vsum0123, vsum4567);
vacc_lo = _mm_add_epi32(vacc_lo, _mm_unpacklo_epi16(vsum, vzero));
vacc_hi = _mm_add_epi32(vacc_hi, _mm_unpackhi_epi16(vsum, vzero));
__m128 vfpacc_lo = _mm_cvtepi32_ps(vacc_lo);
__m128 vfpacc_hi = _mm_cvtepi32_ps(vacc_hi);
vfpacc_lo = _mm_mul_ps(vfpacc_lo, vscale);
vfpacc_hi = _mm_mul_ps(vfpacc_hi, vscale);
vfpacc_lo = _mm_min_ps(vfpacc_lo, voutput_max_less_zero_point);
vfpacc_hi = _mm_min_ps(vfpacc_hi, voutput_max_less_zero_point);
vacc_lo = _mm_cvtps_epi32(vfpacc_lo);
vacc_hi = _mm_cvtps_epi32(vfpacc_hi);
__m128i vout = _mm_adds_epi16(_mm_packs_epi32(vacc_lo, vacc_hi), voutput_zero_point);
vout = _mm_packus_epi16(vout, vout);
vout = _mm_max_epu8(vout, voutput_min);
if (c & 4) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout));
output += 4;
vout = _mm_srli_epi64(vout, 32);
}
if (c & 2) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout, 0));
output += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (c & 1) {
*output = (uint8_t) _mm_cvtsi128_si32(vout);
output += 1;
}
}
}
output = (uint8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 15,791
| 38.381546
| 106
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.