repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-armsimd32-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__armsimd32_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x2_t vinput_zero_point = (uint16x2_t) params->armsimd32.input_zero_point;
const int16x2_t vpositive_multiplier = (int16x2_t) params->armsimd32.positive_multiplier;
const int16x2_t vnegative_multiplier = (int16x2_t) params->armsimd32.negative_multiplier;
const int32_t vbias = params->armsimd32.bias;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
input += 4;
uint16x2_t vx02 = __uxtb16(vx0123);
uint16x2_t vx13 = __uxtb16(__ror(vx0123, 8));
vx02 = __usub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __usub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
int32_t vacc3 = __smlatt(vmultiplier13, vx13, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 8), 8);
vacc1 = __usat(math_asr_s32(vacc1, 8), 8);
vacc2 = __usat(math_asr_s32(vacc2, 8), 8);
vacc3 = __usat(math_asr_s32(vacc3, 8), 8);
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
output[2] = (uint8_t) vacc2;
output[3] = (uint8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
uint16x2_t vx02 = __uxtb16(vx0123);
uint16x2_t vx13 = __uxtb16(__ror(vx0123, 8));
vx02 = __usub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __usub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
const int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 8), 8);
vacc1 = __usat(math_asr_s32(vacc1, 8), 8);
if (batch & (2 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
vacc0 = __usat(math_asr_s32(vacc2, 8), 8);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
}
}
}
| 3,324
| 35.141304
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-armsimd32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__armsimd32_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x2_t vinput_zero_point = (uint16x2_t) params->armsimd32.input_zero_point;
const int16x2_t vpositive_multiplier = (int16x2_t) params->armsimd32.positive_multiplier;
const int16x2_t vnegative_multiplier = (int16x2_t) params->armsimd32.negative_multiplier;
const int32_t vbias = params->armsimd32.bias;
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_indexed_load_u32(input, 0);
const uint8x4_t vx4567 = (uint8x4_t) unaligned_indexed_load_u32(input, 1);
input += 8;
uint16x2_t vx02 = __uxtb16(vx0123);
uint16x2_t vx13 = __uxtb16(__ror(vx0123, 8));
uint16x2_t vx46 = __uxtb16(vx4567);
uint16x2_t vx57 = __uxtb16(__ror(vx4567, 8));
vx02 = __usub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __usub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx46 = __usub16(vinput_zero_point, vx46);
const int16x2_t vmultiplier46 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx57 = __usub16(vinput_zero_point, vx57);
const int16x2_t vmultiplier57 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
int32_t vacc3 = __smlatt(vmultiplier13, vx13, vbias);
int32_t vacc4 = __smlabb(vmultiplier46, vx46, vbias);
int32_t vacc5 = __smlabb(vmultiplier57, vx57, vbias);
int32_t vacc6 = __smlatt(vmultiplier46, vx46, vbias);
int32_t vacc7 = __smlatt(vmultiplier57, vx57, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 8), 8);
vacc1 = __usat(math_asr_s32(vacc1, 8), 8);
vacc2 = __usat(math_asr_s32(vacc2, 8), 8);
vacc3 = __usat(math_asr_s32(vacc3, 8), 8);
vacc4 = __usat(math_asr_s32(vacc4, 8), 8);
vacc5 = __usat(math_asr_s32(vacc5, 8), 8);
vacc6 = __usat(math_asr_s32(vacc6, 8), 8);
vacc7 = __usat(math_asr_s32(vacc7, 8), 8);
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
output[2] = (uint8_t) vacc2;
output[3] = (uint8_t) vacc3;
output[4] = (uint8_t) vacc4;
output[5] = (uint8_t) vacc5;
output[6] = (uint8_t) vacc6;
output[7] = (uint8_t) vacc7;
output += 8;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
input += 4;
uint16x2_t vx02 = __uxtb16(vx0123);
uint16x2_t vx13 = __uxtb16(__ror(vx0123, 8));
vx02 = __usub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __usub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
int32_t vacc3 = __smlatt(vmultiplier13, vx13, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 8), 8);
vacc1 = __usat(math_asr_s32(vacc1, 8), 8);
vacc2 = __usat(math_asr_s32(vacc2, 8), 8);
vacc3 = __usat(math_asr_s32(vacc3, 8), 8);
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
output[2] = (uint8_t) vacc2;
output[3] = (uint8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
uint16x2_t vx02 = __uxtb16(vx0123);
uint16x2_t vx13 = __uxtb16(__ror(vx0123, 8));
vx02 = __usub16(vinput_zero_point, vx02);
const int16x2_t vmultiplier02 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
vx13 = __usub16(vinput_zero_point, vx13);
const int16x2_t vmultiplier13 = (int16x2_t) __sel((uint8x4_t) vnegative_multiplier, (uint8x4_t) vpositive_multiplier);
int32_t vacc0 = __smlabb(vmultiplier02, vx02, vbias);
int32_t vacc1 = __smlabb(vmultiplier13, vx13, vbias);
const int32_t vacc2 = __smlatt(vmultiplier02, vx02, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 8), 8);
vacc1 = __usat(math_asr_s32(vacc1, 8), 8);
if (batch & (2 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
vacc0 = __usat(math_asr_s32(vacc2, 8), 8);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
}
}
}
| 5,555
| 38.971223
| 122
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__avx_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vmultiplier0 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier1 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,845
| 36.705882
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__avx_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier1 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier2 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier2);
vacc2 = _mm_slli_epi16(vacc2, 7);
vmultiplier3 = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier3);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,850
| 40.110169
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__avx_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->avx.input_zero_point);
const __m128i vpositive_multiplier = _mm_load_si128((const __m128i*) params->avx.positive_multiplier);
const __m128i vnegative_multiplier = _mm_load_si128((const __m128i*) params->avx.negative_multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->avx.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,736
| 35.013158
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__avx2_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,133
| 36.759036
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__avx2_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m256i vacc0 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
input += 32;
__m256i vmultiplier0 = _mm256_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
__m256i vmultiplier1 = _mm256_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
vmultiplier0 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vmultiplier1 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier1);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
__m256i vy0 = _mm256_packus_epi16(vacc0, vacc1);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,351
| 38.207207
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__avx2_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vpositive_multiplier = _mm256_load_si256((const __m256i*) params->avx2.positive_multiplier);
const __m256i vnegative_multiplier = _mm256_load_si256((const __m256i*) params->avx2.negative_multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m256i vacc0 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
__m256i vacc2 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 32)));
__m256i vacc3 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 48)));
input += 64;
__m256i vmultiplier0 = _mm256_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
__m256i vmultiplier1 = _mm256_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
__m256i vmultiplier2 = _mm256_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm256_sub_epi16(vinput_zero_point, vacc2);
__m256i vmultiplier3 = _mm256_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm256_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier0);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vmultiplier1 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier1);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vmultiplier2 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier2);
vacc2 = _mm256_slli_epi16(vacc2, 7);
vmultiplier3 = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier3);
vacc3 = _mm256_slli_epi16(vacc3, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm256_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm256_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm256_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm256_adds_epi16(vacc3, voutput_zero_point);
__m256i vy0 = _mm256_packus_epi16(vacc0, vacc1);
__m256i vy1 = _mm256_packus_epi16(vacc2, vacc3);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
vy1 = _mm256_permute4x64_epi64(vy1, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vmultiplier = _mm256_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm256_blendv_epi8(vnegative_multiplier, vpositive_multiplier, vmultiplier);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,464
| 41.695313
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__neon_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t vx0 = vld1q_u8(input); input += 16;
int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
const uint16x8_t vmask0 = vcltq_s16(vacc0, vmovq_n_s16(0));
const uint16x8_t vmask1 = vcltq_s16(vacc1, vmovq_n_s16(0));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
const int16x8_t vmultiplier0 = vbslq_s16(vmask0, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier1 = vbslq_s16(vmask1, vpositive_multiplier, vnegative_multiplier);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier0);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier1);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
vst1q_u8(output, vy0); output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vacc);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 3,610
| 37.010526
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__neon_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const uint8x16_t vx0 = vld1q_u8(input); input += 16;
const uint8x16_t vx1 = vld1q_u8(input); input += 16;
int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
int16x8_t vacc2 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx1)));
int16x8_t vacc3 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx1)));
const uint16x8_t vmask0 = vcltq_s16(vacc0, vmovq_n_s16(0));
const uint16x8_t vmask1 = vcltq_s16(vacc1, vmovq_n_s16(0));
const uint16x8_t vmask2 = vcltq_s16(vacc2, vmovq_n_s16(0));
const uint16x8_t vmask3 = vcltq_s16(vacc3, vmovq_n_s16(0));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc2 = vshlq_n_s16(vacc2, 7);
vacc3 = vshlq_n_s16(vacc3, 7);
const int16x8_t vmultiplier0 = vbslq_s16(vmask0, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier1 = vbslq_s16(vmask1, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier2 = vbslq_s16(vmask2, vpositive_multiplier, vnegative_multiplier);
const int16x8_t vmultiplier3 = vbslq_s16(vmask3, vpositive_multiplier, vnegative_multiplier);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier0);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier1);
vacc2 = vqrdmulhq_s16(vacc2, vmultiplier2);
vacc3 = vqrdmulhq_s16(vacc3, vmultiplier3);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
vacc2 = vqaddq_s16(vacc2, voutput_zero_point);
vacc3 = vqaddq_s16(vacc3, voutput_zero_point);
const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
const uint8x16_t vy1 = vcombine_u8(vqmovun_s16(vacc2), vqmovun_s16(vacc3));
vst1q_u8(output, vy0); output += 16;
vst1q_u8(output, vy1); output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vacc);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 4,565
| 40.509091
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__neon_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vpositive_multiplier = vld1q_dup_s16(¶ms->neon.positive_multiplier);
const int16x8_t vnegative_multiplier = vld1q_dup_s16(¶ms->neon.negative_multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vacc);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
const uint16x8_t vmask = vcltq_s16(vacc, vmovq_n_s16(0));
vacc = vshlq_n_s16(vacc, 7);
const int16x8_t vmultiplier = vbslq_s16(vmask, vpositive_multiplier, vnegative_multiplier);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 2,569
| 35.714286
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-scalar-andxor-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-andxor.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__scalar_andxor_x1(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_andxor.input_zero_point;
const int32_t vmultiplier_diff = params->scalar_andxor.multiplier_diff;
const int32_t vmultiplier_base = params->scalar_andxor.multiplier_base;
const int32_t vbias = params->scalar_andxor.bias;
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = vmultiplier_base ^ (vmultiplier_diff & math_asr_s32(vacc, 31));
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,341
| 29.5
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-scalar-andxor-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-andxor.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__scalar_andxor_x2(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_andxor.input_zero_point;
const int32_t vmultiplier_diff = params->scalar_andxor.multiplier_diff;
const int32_t vmultiplier_base = params->scalar_andxor.multiplier_base;
const int32_t vbias = params->scalar_andxor.bias;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
input += 2;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
int32_t vmultiplier0 = math_asr_s32(vacc0, 31);
int32_t vmultiplier1 = math_asr_s32(vacc1, 31);
vmultiplier0 &= vmultiplier_diff;
vmultiplier1 &= vmultiplier_diff;
vmultiplier0 ^= vmultiplier_base;
vmultiplier1 ^= vmultiplier_base;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = vmultiplier_base ^ (vmultiplier_diff & math_asr_s32(vacc, 31));
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output = (uint8_t) vout;
}
}
| 2,216
| 28.56
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-scalar-andxor-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-andxor.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__scalar_andxor_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_andxor.input_zero_point;
const int32_t vmultiplier_diff = params->scalar_andxor.multiplier_diff;
const int32_t vmultiplier_base = params->scalar_andxor.multiplier_base;
const int32_t vbias = params->scalar_andxor.bias;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
int32_t vacc2 = (int32_t) input[2];
int32_t vacc3 = (int32_t) input[3];
input += 4;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
vacc2 -= vinput_zero_point;
vacc3 -= vinput_zero_point;
int32_t vmultiplier0 = math_asr_s32(vacc0, 31);
int32_t vmultiplier1 = math_asr_s32(vacc1, 31);
int32_t vmultiplier2 = math_asr_s32(vacc2, 31);
int32_t vmultiplier3 = math_asr_s32(vacc3, 31);
vmultiplier0 &= vmultiplier_diff;
vmultiplier1 &= vmultiplier_diff;
vmultiplier2 &= vmultiplier_diff;
vmultiplier3 &= vmultiplier_diff;
vmultiplier0 ^= vmultiplier_base;
vmultiplier1 ^= vmultiplier_base;
vmultiplier2 ^= vmultiplier_base;
vmultiplier3 ^= vmultiplier_base;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
vacc2 = vbias + vacc2 * vmultiplier2;
vacc3 = vbias + vacc3 * vmultiplier3;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
int32_t vout2 = math_asr_s32(vacc2, 8);
int32_t vout3 = math_asr_s32(vacc3, 8);
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout2 = math_max_s32(vout2, 0);
vout3 = math_max_s32(vout3, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
vout2 = math_min_s32(vout2, 255);
vout3 = math_min_s32(vout3, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = vmultiplier_base ^ (vmultiplier_diff & math_asr_s32(vacc, 31));
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 3,086
| 30.181818
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-scalar-select-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-select.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__scalar_select_x1(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_select.input_zero_point;
const int32_t vpositive_multiplier = params->scalar_select.positive_multiplier;
const int32_t vnegative_multiplier = params->scalar_select.negative_multiplier;
const int32_t vbias = params->scalar_select.bias;
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = XNN_UNPREDICTABLE(vacc >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,369
| 30.136364
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-scalar-select-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-select.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__scalar_select_x2(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_select.input_zero_point;
const int32_t vpositive_multiplier = params->scalar_select.positive_multiplier;
const int32_t vnegative_multiplier = params->scalar_select.negative_multiplier;
const int32_t vbias = params->scalar_select.bias;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
input += 2;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
const int32_t vmultiplier0 = XNN_UNPREDICTABLE(vacc0 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier1 = XNN_UNPREDICTABLE(vacc1 >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = XNN_UNPREDICTABLE(vacc >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output = (uint8_t) vout;
}
}
| 2,206
| 30.985507
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-scalar-select-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/scalar-select.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__scalar_select_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vinput_zero_point = params->scalar_select.input_zero_point;
const int32_t vpositive_multiplier = params->scalar_select.positive_multiplier;
const int32_t vnegative_multiplier = params->scalar_select.negative_multiplier;
const int32_t vbias = params->scalar_select.bias;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
int32_t vacc0 = (int32_t) input[0];
int32_t vacc1 = (int32_t) input[1];
int32_t vacc2 = (int32_t) input[2];
int32_t vacc3 = (int32_t) input[3];
input += 4;
vacc0 -= vinput_zero_point;
vacc1 -= vinput_zero_point;
vacc2 -= vinput_zero_point;
vacc3 -= vinput_zero_point;
const int32_t vmultiplier0 = XNN_UNPREDICTABLE(vacc0 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier1 = XNN_UNPREDICTABLE(vacc1 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier2 = XNN_UNPREDICTABLE(vacc2 >= 0) ? vpositive_multiplier : vnegative_multiplier;
const int32_t vmultiplier3 = XNN_UNPREDICTABLE(vacc3 >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc0 = vbias + vacc0 * vmultiplier0;
vacc1 = vbias + vacc1 * vmultiplier1;
vacc2 = vbias + vacc2 * vmultiplier2;
vacc3 = vbias + vacc3 * vmultiplier3;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
int32_t vout2 = math_asr_s32(vacc2, 8);
int32_t vout3 = math_asr_s32(vacc3, 8);
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout2 = math_max_s32(vout2, 0);
vout3 = math_max_s32(vout3, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
vout2 = math_min_s32(vout2, 255);
vout3 = math_min_s32(vout3, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vacc = (int32_t) *input++ - vinput_zero_point;
const int32_t vmultiplier = XNN_UNPREDICTABLE(vacc >= 0) ? vpositive_multiplier : vnegative_multiplier;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 3,040
| 33.168539
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vlrelu_ukernel__sse2_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc0, vacc1);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy0);
vy0 >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy0;
}
}
}
| 4,992
| 35.445255
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vlrelu_ukernel__sse2_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
__m128i vextx0 = _mm_unpacklo_epi8(vx0, vzero);
__m128i vextx1 = _mm_unpackhi_epi8(vx0, vzero);
__m128i vextx2 = _mm_unpacklo_epi8(vx1, vzero);
__m128i vextx3 = _mm_unpackhi_epi8(vx1, vzero);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vextx2, vinput_zero_point);
vextx2 = _mm_sub_epi16(vinput_zero_point, vextx2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vextx3, vinput_zero_point);
vextx3 = _mm_sub_epi16(vinput_zero_point, vextx3);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
__m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier2);
__m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier3);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodlo2 = _mm_srli_epi16(vprodlo2, 7);
__m128i vprodhi2 = _mm_mulhi_epi16(vextx2, vmultiplier2);
vprodlo3 = _mm_srli_epi16(vprodlo3, 7);
__m128i vprodhi3 = _mm_mulhi_epi16(vextx3, vmultiplier3);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
vprodhi2 = _mm_slli_epi16(vprodhi2, 8);
vprodlo2 = _mm_avg_epu16(vprodlo2, vzero);
vprodhi3 = _mm_slli_epi16(vprodhi3, 8);
vprodlo3 = _mm_avg_epu16(vprodlo3, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
__m128i vacc2 = _mm_add_epi16(vprodlo2, vprodhi2);
__m128i vacc3 = _mm_add_epi16(vprodlo3, vprodhi3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vextx0 = _mm_unpacklo_epi8(vx, vzero);
__m128i vextx1 = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vextx0, vinput_zero_point);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vextx1, vinput_zero_point);
vextx0 = _mm_sub_epi16(vinput_zero_point, vextx0);
vextx1 = _mm_sub_epi16(vinput_zero_point, vextx1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
__m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier0);
__m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier1);
vprodlo0 = _mm_srli_epi16(vprodlo0, 7);
vprodlo1 = _mm_srli_epi16(vprodlo1, 7);
__m128i vprodhi0 = _mm_mulhi_epi16(vextx0, vmultiplier0);
__m128i vprodhi1 = _mm_mulhi_epi16(vextx1, vmultiplier1);
vprodhi0 = _mm_slli_epi16(vprodhi0, 8);
vprodhi1 = _mm_slli_epi16(vprodhi1, 8);
vprodlo0 = _mm_avg_epu16(vprodlo0, vzero);
vprodlo1 = _mm_avg_epu16(vprodlo1, vzero);
__m128i vacc0 = _mm_add_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_add_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc0, vacc1);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy0 = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy0);
vy0 >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy0;
}
}
}
| 8,179
| 38.708738
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__sse41_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,983
| 36.584906
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__sse41_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
vacc2 = _mm_slli_epi16(vacc2, 7);
vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
vacc3 = _mm_slli_epi16(vacc3, 7);
vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 5,064
| 39.846774
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vlrelu.h>
void xnn_qu8_vlrelu_ukernel__sse41_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vmultiplier = _mm_cmpgt_epi16(vacc, vinput_zero_point);
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vmultiplier = _mm_and_si128(vmultiplier, vmultiplier_diff);
vacc = _mm_slli_epi16(vacc, 7);
vmultiplier = _mm_xor_si128(vmultiplier, vmultiplier_base);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,798
| 34.884615
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-ssse3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vlrelu_ukernel__ssse3_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 4,206
| 39.066667
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-ssse3-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vlrelu.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vlrelu_ukernel__ssse3_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->sse2.input_zero_point);
const __m128i vmultiplier_diff = _mm_load_si128((const __m128i*) params->sse2.multiplier_diff);
const __m128i vmultiplier_base = _mm_load_si128((const __m128i*) params->sse2.multiplier_base);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
__m128i vacc0 = _mm_unpacklo_epi8(vx0, vzero);
__m128i vacc1 = _mm_unpackhi_epi8(vx0, vzero);
__m128i vacc2 = _mm_unpacklo_epi8(vx1, vzero);
__m128i vacc3 = _mm_unpackhi_epi8(vx1, vzero);
__m128i vmultiplier0 = _mm_cmpgt_epi16(vacc0, vinput_zero_point);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
__m128i vmultiplier1 = _mm_cmpgt_epi16(vacc1, vinput_zero_point);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
__m128i vmultiplier2 = _mm_cmpgt_epi16(vacc2, vinput_zero_point);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
__m128i vmultiplier3 = _mm_cmpgt_epi16(vacc3, vinput_zero_point);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vmultiplier0 = _mm_and_si128(vmultiplier0, vmultiplier_diff);
vacc0 = _mm_slli_epi16(vacc0, 7);
vmultiplier0 = _mm_xor_si128(vmultiplier0, vmultiplier_base);
vmultiplier1 = _mm_and_si128(vmultiplier1, vmultiplier_diff);
vacc1 = _mm_slli_epi16(vacc1, 7);
vmultiplier1 = _mm_xor_si128(vmultiplier1, vmultiplier_base);
vmultiplier2 = _mm_and_si128(vmultiplier2, vmultiplier_diff);
vacc2 = _mm_slli_epi16(vacc2, 7);
vmultiplier2 = _mm_xor_si128(vmultiplier2, vmultiplier_base);
vmultiplier3 = _mm_and_si128(vmultiplier3, vmultiplier_diff);
vacc3 = _mm_slli_epi16(vacc3, 7);
vmultiplier3 = _mm_xor_si128(vmultiplier3, vmultiplier_base);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier0);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier1);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier2);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier3);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
__m128i vmultiplier_lo = _mm_cmpgt_epi16(vacc_lo, vinput_zero_point);
__m128i vmultiplier_hi = _mm_cmpgt_epi16(vacc_hi, vinput_zero_point);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vmultiplier_lo = _mm_and_si128(vmultiplier_lo, vmultiplier_diff);
vmultiplier_hi = _mm_and_si128(vmultiplier_hi, vmultiplier_diff);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vmultiplier_lo = _mm_xor_si128(vmultiplier_lo, vmultiplier_base);
vmultiplier_hi = _mm_xor_si128(vmultiplier_hi, vmultiplier_base);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier_lo);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier_hi);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 6,473
| 41.038961
| 101
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmrelaxedsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_arm_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
input += 16;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_low_u8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_high_u8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,871
| 37.72
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmrelaxedsimd-arm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_arm_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
input += 32;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_low_u8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_high_u8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
v128_t vacc2 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_low_u8x16(vx1));
v128_t vacc3 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_high_u8x16(vx1));
v128_t vmultiplier2 = wasm_i16x8_shr(vacc2, 15);
v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier2);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier3);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc2 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc2, vmultiplier2);
vacc3 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_u8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = __builtin_wasm_relaxed_laneselect_i16x8(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,878
| 41.426087
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmrelaxedsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
input += 16;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,932
| 36.457143
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmrelaxedsimd-x86-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
v128_t vacc2 = wasm_u16x8_load8x8(input + 16);
v128_t vacc3 = wasm_u16x8_load8x8(input + 24);
input += 32;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
v128_t vmultiplier2 = wasm_i16x8_gt(vacc2, vinput_zero_point);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vmultiplier2 = wasm_v128_and(vmultiplier2, vmultiplier_diff);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = wasm_v128_xor(vmultiplier2, vmultiplier_base);
vmultiplier3 = wasm_v128_and(vmultiplier3, vmultiplier_diff);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = wasm_v128_xor(vmultiplier3, vmultiplier_base);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier0);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier1);
vacc2 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc2, vmultiplier2);
vacc3 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_u8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,983
| 39.520325
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmrelaxedsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmrelaxedsimd_x86_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,775
| 35.051948
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmsimd-arm-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmsimd_arm_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
input += 16;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_low_u8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_high_u8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,720
| 36.21
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmsimd-arm-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-arm.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmsimd_arm_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.input_zero_point);
const v128_t vpositive_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.positive_multiplier);
const v128_t vnegative_multiplier = wasm_v128_load64_splat(params->wasmsimd_arm.negative_multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_arm.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vx0 = wasm_v128_load(input);
v128_t vx1 = wasm_v128_load(input + 16);
input += 32;
v128_t vacc0 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_low_u8x16(vx0));
v128_t vacc1 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_high_u8x16(vx0));
v128_t vmultiplier0 = wasm_i16x8_shr(vacc0, 15);
v128_t vmultiplier1 = wasm_i16x8_shr(vacc1, 15);
v128_t vacc2 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_low_u8x16(vx1));
v128_t vacc3 = wasm_i16x8_sub(vinput_zero_point, wasm_u16x8_extend_high_u8x16(vx1));
v128_t vmultiplier2 = wasm_i16x8_shr(vacc2, 15);
v128_t vmultiplier3 = wasm_i16x8_shr(vacc3, 15);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier0);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier1);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier2);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier3);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc2 = wasm_i16x8_q15mulr_sat(vacc2, vmultiplier2);
vacc3 = wasm_i16x8_q15mulr_sat(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_u8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const v128_t vx = wasm_u16x8_load8x8(input);
v128_t vacc = wasm_i16x8_sub(vinput_zero_point, vx);
v128_t vmultiplier = wasm_i16x8_shr(vacc, 15);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_bitselect(vpositive_multiplier, vnegative_multiplier, vmultiplier);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,655
| 39.486957
| 103
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmsimd-x86-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
input += 16;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,861
| 35.780952
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmsimd-x86-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
v128_t vacc2 = wasm_u16x8_load8x8(input + 16);
v128_t vacc3 = wasm_u16x8_load8x8(input + 24);
input += 32;
v128_t vmultiplier0 = wasm_i16x8_gt(vacc0, vinput_zero_point);
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
v128_t vmultiplier1 = wasm_i16x8_gt(vacc1, vinput_zero_point);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
v128_t vmultiplier2 = wasm_i16x8_gt(vacc2, vinput_zero_point);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
v128_t vmultiplier3 = wasm_i16x8_gt(vacc3, vinput_zero_point);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vmultiplier0 = wasm_v128_and(vmultiplier0, vmultiplier_diff);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vmultiplier0 = wasm_v128_xor(vmultiplier0, vmultiplier_base);
vmultiplier1 = wasm_v128_and(vmultiplier1, vmultiplier_diff);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vmultiplier1 = wasm_v128_xor(vmultiplier1, vmultiplier_base);
vmultiplier2 = wasm_v128_and(vmultiplier2, vmultiplier_diff);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vmultiplier2 = wasm_v128_xor(vmultiplier2, vmultiplier_base);
vmultiplier3 = wasm_v128_and(vmultiplier3, vmultiplier_diff);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vmultiplier3 = wasm_v128_xor(vmultiplier3, vmultiplier_base);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier0);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier1);
vacc2 = wasm_i16x8_q15mulr_sat(vacc2, vmultiplier2);
vacc3 = wasm_i16x8_q15mulr_sat(vacc3, vmultiplier3);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_u8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 4,880
| 38.682927
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vlrelu/gen/qu8-vlrelu-wasmsimd-x86-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vlrelu/wasmsimd-x86.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vlrelu_ukernel__wasmsimd_x86_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_lrelu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.input_zero_point);
const v128_t vmultiplier_diff = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_diff);
const v128_t vmultiplier_base = wasm_v128_load64_splat(params->wasmsimd_x86.multiplier_base);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd_x86.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
v128_t vmultiplier = wasm_i16x8_gt(vacc, vinput_zero_point);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vmultiplier = wasm_v128_and(vmultiplier, vmultiplier_diff);
vacc = wasm_i16x8_shl(vacc, 7);
vmultiplier = wasm_v128_xor(vmultiplier, vmultiplier_base);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,736
| 34.545455
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vxb89ABCDEF = _mm_sub_epi16(vb89ABCDEF, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,249
| 42.103448
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__avx_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,842
| 39.024793
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(params->fp32_neon.a_zero_point);
const uint8x16_t vb_zero_point = vld1q_dup_u8(params->fp32_neon.b_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,554
| 45.067073
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,483
| 42.516779
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__neon_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const uint8x8_t vb01234567 = vld1_u8(input_b);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,890
| 37.511811
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-neonv8-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(params->fp32_neonv8.a_zero_point);
const uint8x16_t vb_zero_point = vld1q_dup_u8(params->fp32_neonv8.b_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,068
| 43.459119
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-neonv8-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,993
| 40.625
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-neonv8-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__neonv8_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const uint8x8_t vb01234567 = vld1_u8(input_b);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,543
| 35.943089
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x1(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vb = (int32_t) *input_b++ - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,800
| 33.634615
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x2(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
input_a += 2;
const int32_t vb0 = input_b[0] - vb_zero_point;
const int32_t vb1 = input_b[1] - vb_zero_point;
input_b += 2;
const int32_t vacc0 = va0 * vb0;
const int32_t vacc1 = va1 * vb1;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = (int32_t) *input_a - va_zero_point;
const int32_t vb = (int32_t) *input_b - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (uint8_t) vout;
}
}
| 2,858
| 34.296296
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__scalar_x4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const int32_t vb_zero_point = params->fp32_scalar.b_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
const int32_t va2 = input_a[2] - va_zero_point;
const int32_t va3 = input_a[3] - va_zero_point;
input_a += 4;
const int32_t vb0 = input_b[0] - vb_zero_point;
const int32_t vb1 = input_b[1] - vb_zero_point;
const int32_t vb2 = input_b[2] - vb_zero_point;
const int32_t vb3 = input_b[3] - vb_zero_point;
input_b += 4;
const int32_t vacc0 = va0 * vb0;
const int32_t vacc1 = va1 * vb1;
const int32_t vacc2 = va2 * vb2;
const int32_t vacc3 = va3 * vb3;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
const int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
const int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vb = (int32_t) *input_b++ - vb_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 3,904
| 36.912621
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
input_a += 16;
input_b += 16;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
va89ABCDEF = _mm_unpacklo_epi8(va89ABCDEF, vzero);
vb89ABCDEF = _mm_unpacklo_epi8(vb89ABCDEF, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vxb89ABCDEF = _mm_sub_epi16(vb89ABCDEF, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,525
| 41.653595
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 5,058
| 38.834646
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vxb89ABCDEF = _mm_sub_epi16(vb89ABCDEF, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb89ABCDEF);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,251
| 42.117241
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxb01234567 = _mm_sub_epi16(vb01234567, vb_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb01234567);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb01234567);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,844
| 39.041322
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-wasmsimd-mul32-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_u16x8_load8x8(input_b + 8);
input_a += 16;
input_b += 16;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
const v128_t vxa89ABCDEF = wasm_i16x8_sub(va89ABCDEF, va_zero_point);
const v128_t vxb89ABCDEF = wasm_i16x8_sub(vb89ABCDEF, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
v128_t vacc89AB = wasm_i32x4_extmul_low_i16x8(vxa89ABCDEF, vxb89ABCDEF);
v128_t vaccCDEF = wasm_i32x4_extmul_high_i16x8(vxa89ABCDEF, vxb89ABCDEF);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,066
| 40.554795
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-fp32-wasmsimd-mul32-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxb01234567 = wasm_i16x8_sub(vb01234567, vb_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb01234567);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb01234567);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 4,693
| 38.116667
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-rndnu-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(params->rndnu_neon.a_zero_point);
const uint8x16_t vb_zero_point = vld1q_dup_u8(params->rndnu_neon.b_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
#endif
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,298
| 44.61875
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-rndnu-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb89ABCDEF));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb89ABCDEF));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,225
| 41.937931
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmul/gen/qu8-vmul-minmax-rndnu-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmul/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmul_minmax_rndnu_ukernel__neon_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const uint8x8_t vb01234567 = vld1_u8(input_b);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb01234567));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb01234567));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,743
| 37.258065
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,767
| 40.496403
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__avx_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,570
| 37.737288
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__neon_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(params->fp32_neon.a_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,718
| 41.796178
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__neon_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neon.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc89AB = vreinterpretq_s32_f32(vaddq_f32(vfpacc89AB, vmagic_bias));
vaccCDEF = vreinterpretq_s32_f32(vaddq_f32(vfpaccCDEF, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = vqsubq_s32(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = vqsubq_s32(vaccCDEF, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,077
| 40.917241
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__neon_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neon.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neon.scale);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neon.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neon.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0123, vmagic_bias));
vacc4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc4567, vmagic_bias));
vacc0123 = vqsubq_s32(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = vqsubq_s32(vacc4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,673
| 36.392
| 123
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-neonv8-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__neonv8_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(params->fp32_neonv8.a_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
#endif
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,230
| 39.993421
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-neonv8-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
float32x4_t vfpacc89AB = vcvtq_f32_s32(vacc89AB);
float32x4_t vfpaccCDEF = vcvtq_f32_s32(vaccCDEF);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vfpacc89AB = vmulq_f32(vfpacc89AB, vscale);
vfpaccCDEF = vmulq_f32(vfpaccCDEF, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
vacc89AB = vcvtnq_s32_f32(vfpacc89AB);
vaccCDEF = vcvtnq_s32_f32(vfpaccCDEF);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,587
| 38.914286
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-neonv8-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__neonv8_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->fp32_neonv8.a_zero_point);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->fp32_neonv8.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->fp32_neonv8.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
float32x4_t vfpacc0123 = vcvtq_f32_s32(vacc0123);
float32x4_t vfpacc4567 = vcvtq_f32_s32(vacc4567);
vfpacc0123 = vmulq_f32(vfpacc0123, vscale);
vfpacc4567 = vmulq_f32(vfpacc4567, vscale);
vacc0123 = vcvtnq_s32_f32(vfpacc0123);
vacc4567 = vcvtnq_s32_f32(vfpacc4567);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,326
| 34.760331
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x1(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,751
| 33.352941
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x2(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
input_a += 2;
const int32_t vacc0 = va0 * vb;
const int32_t vacc1 = va1 * vb;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = (int32_t) *input_a - va_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output = (uint8_t) vout;
}
}
| 2,686
| 34.355263
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__scalar_x4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t va_zero_point = params->fp32_scalar.a_zero_point;
const float vscale = params->fp32_scalar.scale;
const float voutput_min_less_zero_point = params->fp32_scalar.output_min_less_zero_point;
const float voutput_max_less_zero_point = params->fp32_scalar.output_max_less_zero_point;
const float vmagic_bias = params->fp32_scalar.magic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar.magic_bias_less_output_zero_point;
const int32_t vb = (int32_t) *input_b - params->fp32_scalar.b_zero_point;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0] - va_zero_point;
const int32_t va1 = input_a[1] - va_zero_point;
const int32_t va2 = input_a[2] - va_zero_point;
const int32_t va3 = input_a[3] - va_zero_point;
input_a += 4;
const int32_t vacc0 = va0 * vb;
const int32_t vacc1 = va1 * vb;
const int32_t vacc2 = va2 * vb;
const int32_t vacc3 = va3 * vb;
float vfpacc0 = (float) vacc0 * vscale;
float vfpacc1 = (float) vacc1 * vscale;
float vfpacc2 = (float) vacc2 * vscale;
float vfpacc3 = (float) vacc3 * vscale;
vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point);
vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point);
vfpacc2 = math_max_f32(vfpacc2, voutput_min_less_zero_point);
vfpacc3 = math_max_f32(vfpacc3, voutput_min_less_zero_point);
vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point);
vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point);
vfpacc2 = math_min_f32(vfpacc2, voutput_max_less_zero_point);
vfpacc3 = math_min_f32(vfpacc3, voutput_max_less_zero_point);
vfpacc0 += vmagic_bias;
vfpacc1 += vmagic_bias;
vfpacc2 += vmagic_bias;
vfpacc3 += vmagic_bias;
const int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point;
const int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point;
const int32_t vout2 = (int32_t) float_as_uint32(vfpacc2) - vmagic_bias_less_output_zero_point;
const int32_t vout3 = (int32_t) float_as_uint32(vfpacc3) - vmagic_bias_less_output_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = (int32_t) *input_a++ - va_zero_point;
const int32_t vacc = va * vb;
float vfpacc = (float) vacc * vscale;
vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point);
vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point);
vfpacc += vmagic_bias;
const int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point;
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 3,622
| 36.739583
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
input_a += 16;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
va89ABCDEF = _mm_unpacklo_epi8(va89ABCDEF, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,951
| 40.333333
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 4,724
| 37.729508
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vxa89ABCDEF = _mm_sub_epi16(va89ABCDEF, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod89ABCDEFlo = _mm_mullo_epi16(vxa89ABCDEF, vxb);
const __m128i vprod89ABCDEFhi = _mm_mulhi_epi16(vxa89ABCDEF, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod89AB = _mm_unpacklo_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
const __m128i vprodCDEF = _mm_unpackhi_epi16(vprod89ABCDEFlo, vprod89ABCDEFhi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
__m128 vfpacc89AB = _mm_cvtepi32_ps(vprod89AB);
__m128 vfpaccCDEF = _mm_cvtepi32_ps(vprodCDEF);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
vfpacc89AB = _mm_mul_ps(vfpacc89AB, vscale);
vfpaccCDEF = _mm_mul_ps(vfpaccCDEF, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
const __m128i vacc89AB = _mm_cvtps_epi32(vfpacc89AB);
const __m128i vaccCDEF = _mm_cvtps_epi32(vfpaccCDEF);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,769
| 40.510791
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.a_zero_point);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->fp32_sse2.output_max);
__m128i vxb = _mm_sub_epi16(
_mm_shuffle_epi32(_mm_cvtsi32_si128(UINT32_C(0x00010001) * (uint32_t) (uint16_t) (int16_t) *input_b), 0),
_mm_load_si128((const __m128i*) params->fp32_sse2.b_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vxa01234567 = _mm_sub_epi16(va01234567, va_zero_point);
const __m128i vprod01234567lo = _mm_mullo_epi16(vxa01234567, vxb);
const __m128i vprod01234567hi = _mm_mulhi_epi16(vxa01234567, vxb);
const __m128i vprod0123 = _mm_unpacklo_epi16(vprod01234567lo, vprod01234567hi);
const __m128i vprod4567 = _mm_unpackhi_epi16(vprod01234567lo, vprod01234567hi);
__m128 vfpacc0123 = _mm_cvtepi32_ps(vprod0123);
__m128 vfpacc4567 = _mm_cvtepi32_ps(vprod4567);
vfpacc0123 = _mm_mul_ps(vfpacc0123, vscale);
vfpacc4567 = _mm_mul_ps(vfpacc4567, vscale);
const __m128i vacc0123 = _mm_cvtps_epi32(vfpacc0123);
const __m128i vacc4567 = _mm_cvtps_epi32(vfpacc4567);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,572
| 37.754237
| 109
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-wasmsimd-mul32-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
const v128_t vxb = wasm_i16x8_sub(
wasm_i16x8_splat((int16_t) *input_b), wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
input_a += 16;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
const v128_t vxa89ABCDEF = wasm_i16x8_sub(va89ABCDEF, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
v128_t vacc89AB = wasm_i32x4_extmul_low_i16x8(vxa89ABCDEF, vxb);
v128_t vaccCDEF = wasm_i32x4_extmul_high_i16x8(vxa89ABCDEF, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc89AB = wasm_f32x4_convert_i32x4(vacc89AB);
vaccCDEF = wasm_f32x4_convert_i32x4(vaccCDEF);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc89AB = wasm_f32x4_mul(vacc89AB, vscale);
vaccCDEF = wasm_f32x4_mul(vaccCDEF, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc89AB = wasm_f32x4_add(vacc89AB, vmagic_bias);
vaccCDEF = wasm_f32x4_add(vaccCDEF, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc89AB = wasm_i32x4_max(vacc89AB, vmagic_min);
vaccCDEF = wasm_i32x4_max(vaccCDEF, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
vacc89AB = wasm_i32x4_sub(vacc89AB, vmagic_bias_less_output_zero_point);
vaccCDEF = wasm_i32x4_sub(vaccCDEF, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout89ABCDEF = wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
input_a += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,623
| 39.460432
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-fp32-wasmsimd-mul32-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/wasmsimd-mul32-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_fp32_ukernel__wasmsimd_mul32_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.a_zero_point);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
const v128_t vxb = wasm_i16x8_sub(
wasm_i16x8_splat((int16_t) *input_b), wasm_v128_load64_splat(params->fp32_wasmsimd.b_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
input_a += 8;
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vxa01234567 = wasm_i16x8_sub(va01234567, va_zero_point);
v128_t vacc0123 = wasm_i32x4_extmul_low_i16x8(vxa01234567, vxb);
v128_t vacc4567 = wasm_i32x4_extmul_high_i16x8(vxa01234567, vxb);
vacc0123 = wasm_f32x4_convert_i32x4(vacc0123);
vacc4567 = wasm_f32x4_convert_i32x4(vacc4567);
vacc0123 = wasm_f32x4_mul(vacc0123, vscale);
vacc4567 = wasm_f32x4_mul(vacc4567, vscale);
vacc0123 = wasm_f32x4_add(vacc0123, vmagic_bias);
vacc4567 = wasm_f32x4_add(vacc4567, vmagic_bias);
vacc0123 = wasm_i32x4_max(vacc0123, vmagic_min);
vacc4567 = wasm_i32x4_max(vacc4567, vmagic_min);
vacc0123 = wasm_i32x4_sub(vacc0123, vmagic_bias_less_output_zero_point);
vacc4567 = wasm_i32x4_sub(vacc4567, vmagic_bias_less_output_zero_point);
v128_t vout01234567 = wasm_i16x8_narrow_i32x4(vacc0123, vacc4567);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 4,424
| 37.146552
| 132
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-rndnu-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_rndnu_ukernel__neon_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(params->rndnu_neon.a_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
#endif
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,461
| 41.235294
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-rndnu-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_rndnu_ukernel__neon_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
int32x4_t vacc89AB = vmull_s16(vget_low_s16(vxa89ABCDEF), vget_low_s16(vxb));
int32x4_t vaccCDEF = vmull_s16(vget_high_s16(vxa89ABCDEF), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc89AB = vqshlq_s32(vacc89AB, vleft_pre_shift);
vaccCDEF = vqshlq_s32(vaccCDEF, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc89AB = vqdmulhq_s32(vacc89AB, vmultiplier);
vaccCDEF = vqdmulhq_s32(vaccCDEF, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
vacc89AB = vrshlq_s32(vacc89AB, vleft_post_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
int16x8_t vacc89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc89AB), vaccCDEF);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
int16x8_t vacc89ABCDEF = vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
vacc89ABCDEF = vqaddq_s16(vacc89ABCDEF, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x16_t vout0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc01234567), vacc89ABCDEF);
#else
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
#endif
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,819
| 40.276596
| 106
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vmulc/gen/qu8-vmulc-minmax-rndnu-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vmulc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vmul.h>
void xnn_qu8_vmulc_minmax_rndnu_ukernel__neon_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_mul_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(params->rndnu_neon.a_zero_point);
const int32x4_t vleft_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vleft_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.left_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->rndnu_neon.output_max);
const uint8x8_t vb = vld1_dup_u8(input_b);
const uint8x8_t vb_zero_point = vld1_dup_u8(params->rndnu_neon.b_zero_point);
const int16x8_t vxb = vreinterpretq_s16_u16(vsubl_u8(vb, vb_zero_point));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
#if XNN_ARCH_ARM64
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#else
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
#endif
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmull_s16(vget_low_s16(vxa01234567), vget_low_s16(vxb));
int32x4_t vacc4567 = vmull_s16(vget_high_s16(vxa01234567), vget_high_s16(vxb));
vacc0123 = vqshlq_s32(vacc0123, vleft_pre_shift);
vacc4567 = vqshlq_s32(vacc4567, vleft_pre_shift);
vacc0123 = vqdmulhq_s32(vacc0123, vmultiplier);
vacc4567 = vqdmulhq_s32(vacc4567, vmultiplier);
vacc0123 = vrshlq_s32(vacc0123, vleft_post_shift);
vacc4567 = vrshlq_s32(vacc4567, vleft_post_shift);
#if XNN_ARCH_ARM64
int16x8_t vacc01234567 = vqmovn_high_s32(vqmovn_s32(vacc0123), vacc4567);
#else
int16x8_t vacc01234567 = vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567));
#endif
vacc01234567 = vqaddq_s16(vacc01234567, voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,526
| 36.106557
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__neon_x16(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint16x8_t vmax0 = vdupq_n_u16(0);
uint16x8_t vmax1 = vdupq_n_u16(0);
for (; batch >= 16 * sizeof(int16_t); batch -= 16 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const uint16x8_t vabs0 = vreinterpretq_u16_s16(vabsq_s16(vi0));
const uint16x8_t vabs1 = vreinterpretq_u16_s16(vabsq_s16(vi1));
vmax0 = vmaxq_u16(vmax0, vabs0);
vmax1 = vmaxq_u16(vmax1, vabs1);
}
vmax0 = vmaxq_u16(vmax0, vmax1);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
}
if (batch != 0) {
do {
const int16x8_t vi = vld1q_dup_s16(input); input += 1;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
}
#if XNN_ARCH_ARM64
*output = vmaxvq_u16(vmax0);
#else
uint16x4_t vmax_lo = vmax_u16(vget_low_u16(vmax0), vget_high_u16(vmax0));
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vst1_lane_u16(output, vmax_lo, 0);
#endif
}
| 1,915
| 27.597015
| 77
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__neon_x24(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint16x8_t vmax0 = vdupq_n_u16(0);
uint16x8_t vmax1 = vdupq_n_u16(0);
uint16x8_t vmax2 = vdupq_n_u16(0);
for (; batch >= 24 * sizeof(int16_t); batch -= 24 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const uint16x8_t vabs0 = vreinterpretq_u16_s16(vabsq_s16(vi0));
const uint16x8_t vabs1 = vreinterpretq_u16_s16(vabsq_s16(vi1));
const uint16x8_t vabs2 = vreinterpretq_u16_s16(vabsq_s16(vi2));
vmax0 = vmaxq_u16(vmax0, vabs0);
vmax1 = vmaxq_u16(vmax1, vabs1);
vmax2 = vmaxq_u16(vmax2, vabs2);
}
vmax0 = vmaxq_u16(vmax0, vmax1);
vmax0 = vmaxq_u16(vmax0, vmax2);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
}
if (batch != 0) {
do {
const int16x8_t vi = vld1q_dup_s16(input); input += 1;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
}
#if XNN_ARCH_ARM64
*output = vmaxvq_u16(vmax0);
#else
uint16x4_t vmax_lo = vmax_u16(vget_low_u16(vmax0), vget_high_u16(vmax0));
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vst1_lane_u16(output, vmax_lo, 0);
#endif
}
| 2,148
| 28.847222
| 77
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__neon_x32(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint16x8_t vmax0 = vdupq_n_u16(0);
uint16x8_t vmax1 = vdupq_n_u16(0);
uint16x8_t vmax2 = vdupq_n_u16(0);
uint16x8_t vmax3 = vdupq_n_u16(0);
for (; batch >= 32 * sizeof(int16_t); batch -= 32 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vi3 = vld1q_s16(input); input += 8;
const uint16x8_t vabs0 = vreinterpretq_u16_s16(vabsq_s16(vi0));
const uint16x8_t vabs1 = vreinterpretq_u16_s16(vabsq_s16(vi1));
const uint16x8_t vabs2 = vreinterpretq_u16_s16(vabsq_s16(vi2));
const uint16x8_t vabs3 = vreinterpretq_u16_s16(vabsq_s16(vi3));
vmax0 = vmaxq_u16(vmax0, vabs0);
vmax1 = vmaxq_u16(vmax1, vabs1);
vmax2 = vmaxq_u16(vmax2, vabs2);
vmax3 = vmaxq_u16(vmax3, vabs3);
}
vmax0 = vmaxq_u16(vmax0, vmax1);
vmax2 = vmaxq_u16(vmax2, vmax3);
vmax0 = vmaxq_u16(vmax0, vmax2);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
}
if (batch != 0) {
do {
const int16x8_t vi = vld1q_dup_s16(input); input += 1;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
}
#if XNN_ARCH_ARM64
*output = vmaxvq_u16(vmax0);
#else
uint16x4_t vmax_lo = vmax_u16(vget_low_u16(vmax0), vget_high_u16(vmax0));
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vst1_lane_u16(output, vmax_lo, 0);
#endif
}
| 2,381
| 29.935065
| 77
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__neon_x8(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint16x8_t vmax0 = vdupq_n_u16(0);
for (; batch >= 8 * sizeof(int16_t); batch -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
}
if (batch != 0) {
do {
const int16x8_t vi = vld1q_dup_s16(input); input += 1;
const uint16x8_t vabs = vreinterpretq_u16_s16(vabsq_s16(vi));
vmax0 = vmaxq_u16(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
}
#if XNN_ARCH_ARM64
*output = vmaxvq_u16(vmax0);
#else
uint16x4_t vmax_lo = vmax_u16(vget_low_u16(vmax0), vget_high_u16(vmax0));
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vmax_lo = vpmax_u16(vmax_lo, vmax_lo);
vst1_lane_u16(output, vmax_lo, 0);
#endif
}
| 1,440
| 25.685185
| 77
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__scalar_x1(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint32_t vmax0 = 0;
do {
const int32_t vi = (int32_t) *input++;
const uint32_t vabs = math_abs_s32(vi);
vmax0 = math_max_u32(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
*output = (uint16_t) vmax0;
}
| 859
| 21.631579
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__scalar_x2(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint32_t vmax0 = 0;
uint32_t vmax1 = 0;
for (; batch >= 2 * sizeof(int16_t); batch -= 2 * sizeof(int16_t)) {
const int32_t vi0 = (int32_t) input[0];
const int32_t vi1 = (int32_t) input[1];
input += 2;
const uint32_t vabs0 = math_abs_s32(vi0);
const uint32_t vabs1 = math_abs_s32(vi1);
vmax0 = math_max_u32(vmax0, vabs0);
vmax1 = math_max_u32(vmax1, vabs1);
}
vmax0 = math_max_u32(vmax0, vmax1);
if (batch != 0) {
assert(batch == sizeof(int16_t));
const int32_t vi = (int32_t) *input;
const uint32_t vabs = math_abs_s32(vi);
vmax0 = math_max_u32(vmax0, vabs);
}
*output = (uint16_t) vmax0;
}
| 1,273
| 23.037736
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-scalar-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__scalar_x3(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint32_t vmax0 = 0;
uint32_t vmax1 = 0;
uint32_t vmax2 = 0;
for (; batch >= 3 * sizeof(int16_t); batch -= 3 * sizeof(int16_t)) {
const int32_t vi0 = (int32_t) input[0];
const int32_t vi1 = (int32_t) input[1];
const int32_t vi2 = (int32_t) input[2];
input += 3;
const uint32_t vabs0 = math_abs_s32(vi0);
const uint32_t vabs1 = math_abs_s32(vi1);
const uint32_t vabs2 = math_abs_s32(vi2);
vmax0 = math_max_u32(vmax0, vabs0);
vmax1 = math_max_u32(vmax1, vabs1);
vmax2 = math_max_u32(vmax2, vabs2);
}
vmax0 = math_max_u32(vmax0, vmax1);
vmax0 = math_max_u32(vmax0, vmax2);
if (batch != 0) {
do {
const int32_t vi = (int32_t) *input++;
const uint32_t vabs = math_abs_s32(vi);
vmax0 = math_max_u32(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
}
*output = (uint16_t) vmax0;
}
| 1,500
| 24.016667
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-rmaxabs/gen/s16-rmaxabs-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-rmaxabs/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/rmaxabs.h>
void xnn_s16_rmaxabs_ukernel__scalar_x4(
size_t batch,
const int16_t* input,
uint16_t* output)
{
assert(batch != 0);
assert(batch % sizeof(int16_t) == 0);
assert(input != NULL);
assert(output != NULL);
uint32_t vmax0 = 0;
uint32_t vmax1 = 0;
uint32_t vmax2 = 0;
uint32_t vmax3 = 0;
for (; batch >= 4 * sizeof(int16_t); batch -= 4 * sizeof(int16_t)) {
const int32_t vi0 = (int32_t) input[0];
const int32_t vi1 = (int32_t) input[1];
const int32_t vi2 = (int32_t) input[2];
const int32_t vi3 = (int32_t) input[3];
input += 4;
const uint32_t vabs0 = math_abs_s32(vi0);
const uint32_t vabs1 = math_abs_s32(vi1);
const uint32_t vabs2 = math_abs_s32(vi2);
const uint32_t vabs3 = math_abs_s32(vi3);
vmax0 = math_max_u32(vmax0, vabs0);
vmax1 = math_max_u32(vmax1, vabs1);
vmax2 = math_max_u32(vmax2, vabs2);
vmax3 = math_max_u32(vmax3, vabs3);
}
vmax0 = math_max_u32(vmax0, vmax1);
vmax2 = math_max_u32(vmax2, vmax3);
vmax0 = math_max_u32(vmax0, vmax2);
if (batch != 0) {
do {
const int32_t vi = (int32_t) *input++;
const uint32_t vabs = math_abs_s32(vi);
vmax0 = math_max_u32(vmax0, vabs);
batch -= sizeof(int16_t);
} while (batch != 0);
}
*output = (uint16_t) vmax0;
}
| 1,690
| 25.015385
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__neon_x16(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
const int32x4_t vshift = vdupq_n_s32(-(int32_t)shift); // negative to shift right.
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(int16_t); c -= 16 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
vacc0_lo = vshlq_s32(vacc0_lo, vshift);
vacc0_hi = vshlq_s32(vacc0_hi, vshift);
vacc1_lo = vshlq_s32(vacc1_lo, vshift);
vacc1_hi = vshlq_s32(vacc1_hi, vshift);
const int16x8_t vout0 = vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi));
const int16x8_t vout1 = vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi));
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc_lo = vshlq_s32(vacc_lo, vshift);
vacc_hi = vshlq_s32(vacc_hi, vshift);
const int16x8_t vout = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
vacc = vshlq_s32(vacc, vshift);
int16x4_t vout = vqmovn_s32(vacc);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc = vshlq_s32(vacc, vshift);
vout = vqmovn_s32(vacc);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 3,436
| 33.029703
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__neon_x24(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
const int32x4_t vshift = vdupq_n_s32(-(int32_t)shift); // negative to shift right.
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 24 * sizeof(int16_t); c -= 24 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vw2 = vld1q_s16(w); w += 8;
int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
int32x4_t vacc2_lo = vmull_s16(vget_low_s16(vi2), vget_low_s16(vw2));
int32x4_t vacc2_hi = vmull_s16(vget_high_s16(vi2), vget_high_s16(vw2));
vacc0_lo = vshlq_s32(vacc0_lo, vshift);
vacc0_hi = vshlq_s32(vacc0_hi, vshift);
vacc1_lo = vshlq_s32(vacc1_lo, vshift);
vacc1_hi = vshlq_s32(vacc1_hi, vshift);
vacc2_lo = vshlq_s32(vacc2_lo, vshift);
vacc2_hi = vshlq_s32(vacc2_hi, vshift);
const int16x8_t vout0 = vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi));
const int16x8_t vout1 = vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi));
const int16x8_t vout2 = vcombine_s16(vqmovn_s32(vacc2_lo), vqmovn_s32(vacc2_hi));
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
vst1q_s16(output, vout2); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc_lo = vshlq_s32(vacc_lo, vshift);
vacc_hi = vshlq_s32(vacc_hi, vshift);
const int16x8_t vout = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
vacc = vshlq_s32(vacc, vshift);
int16x4_t vout = vqmovn_s32(vacc);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc = vshlq_s32(vacc, vshift);
vout = vqmovn_s32(vacc);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 3,923
| 35
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__neon_x32(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
const int32x4_t vshift = vdupq_n_s32(-(int32_t)shift); // negative to shift right.
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 32 * sizeof(int16_t); c -= 32 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vi3 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vw2 = vld1q_s16(w); w += 8;
const int16x8_t vw3 = vld1q_s16(w); w += 8;
int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
int32x4_t vacc2_lo = vmull_s16(vget_low_s16(vi2), vget_low_s16(vw2));
int32x4_t vacc2_hi = vmull_s16(vget_high_s16(vi2), vget_high_s16(vw2));
int32x4_t vacc3_lo = vmull_s16(vget_low_s16(vi3), vget_low_s16(vw3));
int32x4_t vacc3_hi = vmull_s16(vget_high_s16(vi3), vget_high_s16(vw3));
vacc0_lo = vshlq_s32(vacc0_lo, vshift);
vacc0_hi = vshlq_s32(vacc0_hi, vshift);
vacc1_lo = vshlq_s32(vacc1_lo, vshift);
vacc1_hi = vshlq_s32(vacc1_hi, vshift);
vacc2_lo = vshlq_s32(vacc2_lo, vshift);
vacc2_hi = vshlq_s32(vacc2_hi, vshift);
vacc3_lo = vshlq_s32(vacc3_lo, vshift);
vacc3_hi = vshlq_s32(vacc3_hi, vshift);
const int16x8_t vout0 = vcombine_s16(vqmovn_s32(vacc0_lo), vqmovn_s32(vacc0_hi));
const int16x8_t vout1 = vcombine_s16(vqmovn_s32(vacc1_lo), vqmovn_s32(vacc1_hi));
const int16x8_t vout2 = vcombine_s16(vqmovn_s32(vacc2_lo), vqmovn_s32(vacc2_hi));
const int16x8_t vout3 = vcombine_s16(vqmovn_s32(vacc3_lo), vqmovn_s32(vacc3_hi));
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
vst1q_s16(output, vout2); output += 8;
vst1q_s16(output, vout3); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc_lo = vshlq_s32(vacc_lo, vshift);
vacc_hi = vshlq_s32(vacc_hi, vshift);
const int16x8_t vout = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
vacc = vshlq_s32(vacc, vshift);
int16x4_t vout = vqmovn_s32(vacc);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc = vshlq_s32(vacc, vshift);
vout = vqmovn_s32(vacc);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 4,410
| 36.700855
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__neon_x8(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
const int32x4_t vshift = vdupq_n_s32(-(int32_t)shift); // negative to shift right.
do {
const int16_t* w = weights;
size_t c = channels;
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc_lo = vshlq_s32(vacc_lo, vshift);
vacc_hi = vshlq_s32(vacc_hi, vshift);
const int16x8_t vout = vcombine_s16(vqmovn_s32(vacc_lo), vqmovn_s32(vacc_hi));
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
vacc = vshlq_s32(vacc, vshift);
int16x4_t vout = vqmovn_s32(vacc);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vacc = vshlq_s32(vacc, vshift);
vout = vqmovn_s32(vacc);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 2,383
| 29.564103
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__scalar_x1(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift)
{
assert(rows > 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
do {
size_t c = channels;
const int16_t* w = weights;
do {
const int32_t vi = (int32_t) *input++;
const int32_t vw = (int32_t) *w++;
int32_t vout = vi * vw;
vout = math_asr_s32(vout, shift);
vout = math_max_s32(vout, INT16_MIN);
vout = math_min_s32(vout, INT16_MAX);
*output++ = (int16_t) vout;
c -= sizeof(int16_t);
} while (c != 0);
} while (--rows != 0);
}
| 1,141
| 22.791667
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__scalar_x2(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift)
{
assert(rows > 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
do {
size_t c = channels;
const int16_t* w = weights;
for (; c >= 2 * sizeof(int16_t); c -= 2 * sizeof(int16_t)) {
const int16_t vi0 = input[0];
const int16_t vi1 = input[1];
input += 2;
const int16_t w0 = w[0];
const int16_t w1 = w[1];
w += 2;
int32_t vout0 = (int32_t) vi0 * (int32_t) w0;
int32_t vout1 = (int32_t) vi1 * (int32_t) w1;
vout0 = math_asr_s32(vout0, shift);
vout1 = math_asr_s32(vout1, shift);
vout0 = math_max_s32(vout0, INT16_MIN);
vout1 = math_max_s32(vout1, INT16_MIN);
vout0 = math_min_s32(vout0, INT16_MAX);
vout1 = math_min_s32(vout1, INT16_MAX);
output[0] = (int16_t) vout0;
output[1] = (int16_t) vout1;
output += 2;
}
if XNN_UNLIKELY(c != 0) {
assert(c == sizeof(int16_t));
const int32_t vi = (int32_t) *input++;
const int32_t vw = (int32_t) *w;
int32_t vout = vi * vw;
vout = math_asr_s32(vout, shift);
vout = math_max_s32(vout, INT16_MIN);
vout = math_min_s32(vout, INT16_MAX);
*output++ = (int16_t) vout;
}
} while (--rows != 0);
}
| 1,857
| 24.108108
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-scalar-x3.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__scalar_x3(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift)
{
assert(rows > 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
do {
size_t c = channels;
const int16_t* w = weights;
for (; c >= 3 * sizeof(int16_t); c -= 3 * sizeof(int16_t)) {
const int16_t vi0 = input[0];
const int16_t vi1 = input[1];
const int16_t vi2 = input[2];
input += 3;
const int16_t w0 = w[0];
const int16_t w1 = w[1];
const int16_t w2 = w[2];
w += 3;
int32_t vout0 = (int32_t) vi0 * (int32_t) w0;
int32_t vout1 = (int32_t) vi1 * (int32_t) w1;
int32_t vout2 = (int32_t) vi2 * (int32_t) w2;
vout0 = math_asr_s32(vout0, shift);
vout1 = math_asr_s32(vout1, shift);
vout2 = math_asr_s32(vout2, shift);
vout0 = math_max_s32(vout0, INT16_MIN);
vout1 = math_max_s32(vout1, INT16_MIN);
vout2 = math_max_s32(vout2, INT16_MIN);
vout0 = math_min_s32(vout0, INT16_MAX);
vout1 = math_min_s32(vout1, INT16_MAX);
vout2 = math_min_s32(vout2, INT16_MAX);
output[0] = (int16_t) vout0;
output[1] = (int16_t) vout1;
output[2] = (int16_t) vout2;
output += 3;
}
if XNN_UNLIKELY(c != 0) {
do {
const int32_t vi = (int32_t) *input++;
const int32_t vw = (int32_t) *w++;
int32_t vout = vi * vw;
vout = math_asr_s32(vout, shift);
vout = math_max_s32(vout, INT16_MIN);
vout = math_min_s32(vout, INT16_MAX);
*output++ = (int16_t) vout;
c -= sizeof(int16_t);
} while (c != 0);
}
} while (--rows != 0);
}
| 2,190
| 25.39759
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_ukernel__scalar_x4(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift)
{
assert(rows > 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift < 32);
do {
size_t c = channels;
const int16_t* w = weights;
for (; c >= 4 * sizeof(int16_t); c -= 4 * sizeof(int16_t)) {
const int16_t vi0 = input[0];
const int16_t vi1 = input[1];
const int16_t vi2 = input[2];
const int16_t vi3 = input[3];
input += 4;
const int16_t w0 = w[0];
const int16_t w1 = w[1];
const int16_t w2 = w[2];
const int16_t w3 = w[3];
w += 4;
int32_t vout0 = (int32_t) vi0 * (int32_t) w0;
int32_t vout1 = (int32_t) vi1 * (int32_t) w1;
int32_t vout2 = (int32_t) vi2 * (int32_t) w2;
int32_t vout3 = (int32_t) vi3 * (int32_t) w3;
vout0 = math_asr_s32(vout0, shift);
vout1 = math_asr_s32(vout1, shift);
vout2 = math_asr_s32(vout2, shift);
vout3 = math_asr_s32(vout3, shift);
vout0 = math_max_s32(vout0, INT16_MIN);
vout1 = math_max_s32(vout1, INT16_MIN);
vout2 = math_max_s32(vout2, INT16_MIN);
vout3 = math_max_s32(vout3, INT16_MIN);
vout0 = math_min_s32(vout0, INT16_MAX);
vout1 = math_min_s32(vout1, INT16_MAX);
vout2 = math_min_s32(vout2, INT16_MAX);
vout3 = math_min_s32(vout3, INT16_MAX);
output[0] = (int16_t) vout0;
output[1] = (int16_t) vout1;
output[2] = (int16_t) vout2;
output[3] = (int16_t) vout3;
output += 4;
}
if XNN_UNLIKELY(c != 0) {
do {
const int32_t vi = (int32_t) *input++;
const int32_t vw = (int32_t) *w++;
int32_t vout = vi * vw;
vout = math_asr_s32(vout, shift);
vout = math_max_s32(vout, INT16_MIN);
vout = math_min_s32(vout, INT16_MAX);
*output++ = (int16_t) vout;
c -= sizeof(int16_t);
} while (c != 0);
}
} while (--rows != 0);
}
| 2,478
| 26.544444
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift12-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift12_ukernel__neon_x16(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 12);
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(int16_t); c -= 16 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
const int16x4_t vshift0_lo = vqshrn_n_s32(vacc0_lo, 12);
const int16x4_t vshift0_hi = vqshrn_n_s32(vacc0_hi, 12);
const int16x4_t vshift1_lo = vqshrn_n_s32(vacc1_lo, 12);
const int16x4_t vshift1_hi = vqshrn_n_s32(vacc1_hi, 12);
const int16x8_t vout0 = vcombine_s16(vshift0_lo, vshift0_hi);
const int16x8_t vout1 = vcombine_s16(vshift1_lo, vshift1_hi);
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
const int16x4_t vshift_lo = vqshrn_n_s32(vacc_lo, 12);
const int16x4_t vshift_hi = vqshrn_n_s32(vacc_hi, 12);
const int16x8_t vout = vcombine_s16(vshift_lo, vshift_hi);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int16x4_t vout = vqshrn_n_s32(vacc, 12);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vout = vqshrn_n_s32(vacc, 12);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 3,335
| 33.040816
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift12-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift12_ukernel__neon_x24(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 12);
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 24 * sizeof(int16_t); c -= 24 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vw2 = vld1q_s16(w); w += 8;
int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
int32x4_t vacc2_lo = vmull_s16(vget_low_s16(vi2), vget_low_s16(vw2));
int32x4_t vacc2_hi = vmull_s16(vget_high_s16(vi2), vget_high_s16(vw2));
const int16x4_t vshift0_lo = vqshrn_n_s32(vacc0_lo, 12);
const int16x4_t vshift0_hi = vqshrn_n_s32(vacc0_hi, 12);
const int16x4_t vshift1_lo = vqshrn_n_s32(vacc1_lo, 12);
const int16x4_t vshift1_hi = vqshrn_n_s32(vacc1_hi, 12);
const int16x4_t vshift2_lo = vqshrn_n_s32(vacc2_lo, 12);
const int16x4_t vshift2_hi = vqshrn_n_s32(vacc2_hi, 12);
const int16x8_t vout0 = vcombine_s16(vshift0_lo, vshift0_hi);
const int16x8_t vout1 = vcombine_s16(vshift1_lo, vshift1_hi);
const int16x8_t vout2 = vcombine_s16(vshift2_lo, vshift2_hi);
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
vst1q_s16(output, vout2); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
const int16x4_t vshift_lo = vqshrn_n_s32(vacc_lo, 12);
const int16x4_t vshift_hi = vqshrn_n_s32(vacc_hi, 12);
const int16x8_t vout = vcombine_s16(vshift_lo, vshift_hi);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int16x4_t vout = vqshrn_n_s32(vacc, 12);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vout = vqshrn_n_s32(vacc, 12);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 3,836
| 35.198113
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift12-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift12_ukernel__neon_x32(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 12);
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 32 * sizeof(int16_t); c -= 32 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vi3 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vw2 = vld1q_s16(w); w += 8;
const int16x8_t vw3 = vld1q_s16(w); w += 8;
int32x4_t vacc0_lo = vmull_s16(vget_low_s16(vi0), vget_low_s16(vw0));
int32x4_t vacc0_hi = vmull_s16(vget_high_s16(vi0), vget_high_s16(vw0));
int32x4_t vacc1_lo = vmull_s16(vget_low_s16(vi1), vget_low_s16(vw1));
int32x4_t vacc1_hi = vmull_s16(vget_high_s16(vi1), vget_high_s16(vw1));
int32x4_t vacc2_lo = vmull_s16(vget_low_s16(vi2), vget_low_s16(vw2));
int32x4_t vacc2_hi = vmull_s16(vget_high_s16(vi2), vget_high_s16(vw2));
int32x4_t vacc3_lo = vmull_s16(vget_low_s16(vi3), vget_low_s16(vw3));
int32x4_t vacc3_hi = vmull_s16(vget_high_s16(vi3), vget_high_s16(vw3));
const int16x4_t vshift0_lo = vqshrn_n_s32(vacc0_lo, 12);
const int16x4_t vshift0_hi = vqshrn_n_s32(vacc0_hi, 12);
const int16x4_t vshift1_lo = vqshrn_n_s32(vacc1_lo, 12);
const int16x4_t vshift1_hi = vqshrn_n_s32(vacc1_hi, 12);
const int16x4_t vshift2_lo = vqshrn_n_s32(vacc2_lo, 12);
const int16x4_t vshift2_hi = vqshrn_n_s32(vacc2_hi, 12);
const int16x4_t vshift3_lo = vqshrn_n_s32(vacc3_lo, 12);
const int16x4_t vshift3_hi = vqshrn_n_s32(vacc3_hi, 12);
const int16x8_t vout0 = vcombine_s16(vshift0_lo, vshift0_hi);
const int16x8_t vout1 = vcombine_s16(vshift1_lo, vshift1_hi);
const int16x8_t vout2 = vcombine_s16(vshift2_lo, vshift2_hi);
const int16x8_t vout3 = vcombine_s16(vshift3_lo, vshift3_hi);
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
vst1q_s16(output, vout2); output += 8;
vst1q_s16(output, vout3); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
const int16x4_t vshift_lo = vqshrn_n_s32(vacc_lo, 12);
const int16x4_t vshift_hi = vqshrn_n_s32(vacc_hi, 12);
const int16x8_t vout = vcombine_s16(vshift_lo, vshift_hi);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int16x4_t vout = vqshrn_n_s32(vacc, 12);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vout = vqshrn_n_s32(vacc, 12);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 4,337
| 37.052632
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift12-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift12_ukernel__neon_x8(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 12);
do {
const int16_t* w = weights;
size_t c = channels;
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
int32x4_t vacc_lo = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int32x4_t vacc_hi = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
const int16x4_t vshift_lo = vqshrn_n_s32(vacc_lo, 12);
const int16x4_t vshift_hi = vqshrn_n_s32(vacc_hi, 12);
const int16x8_t vout = vcombine_s16(vshift_lo, vshift_hi);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int32x4_t vacc = vmull_s16(vget_low_s16(vi), vget_low_s16(vw));
int16x4_t vout = vqshrn_n_s32(vacc, 12);
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vacc = vmull_s16(vget_high_s16(vi), vget_high_s16(vw));
vout = vqshrn_n_s32(vacc, 12);
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 2,254
| 29.066667
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift15-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift15_ukernel__neon_x16(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 15);
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 16 * sizeof(int16_t); c -= 16 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vout0 = vqdmulhq_s16(vi0, vw0);
const int16x8_t vout1 = vqdmulhq_s16(vi1, vw1);
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
const int16x8_t vout = vqdmulhq_s16(vi, vw);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int16x4_t vout = vqdmulh_s16(vget_low_s16(vi), vget_low_s16(vw));
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vout = vqdmulh_s16(vget_high_s16(vi), vget_high_s16(vw));
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 2,379
| 28.02439
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift15-neon-x24.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift15_ukernel__neon_x24(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 15);
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 24 * sizeof(int16_t); c -= 24 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vw2 = vld1q_s16(w); w += 8;
const int16x8_t vout0 = vqdmulhq_s16(vi0, vw0);
const int16x8_t vout1 = vqdmulhq_s16(vi1, vw1);
const int16x8_t vout2 = vqdmulhq_s16(vi2, vw2);
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
vst1q_s16(output, vout2); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
const int16x8_t vout = vqdmulhq_s16(vi, vw);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int16x4_t vout = vqdmulh_s16(vget_low_s16(vi), vget_low_s16(vw));
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vout = vqdmulh_s16(vget_high_s16(vi), vget_high_s16(vw));
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 2,586
| 29.081395
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift15-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift15_ukernel__neon_x32(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 15);
do {
const int16_t* w = weights;
size_t c = channels;
for (; c >= 32 * sizeof(int16_t); c -= 32 * sizeof(int16_t)) {
const int16x8_t vi0 = vld1q_s16(input); input += 8;
const int16x8_t vi1 = vld1q_s16(input); input += 8;
const int16x8_t vi2 = vld1q_s16(input); input += 8;
const int16x8_t vi3 = vld1q_s16(input); input += 8;
const int16x8_t vw0 = vld1q_s16(w); w += 8;
const int16x8_t vw1 = vld1q_s16(w); w += 8;
const int16x8_t vw2 = vld1q_s16(w); w += 8;
const int16x8_t vw3 = vld1q_s16(w); w += 8;
const int16x8_t vout0 = vqdmulhq_s16(vi0, vw0);
const int16x8_t vout1 = vqdmulhq_s16(vi1, vw1);
const int16x8_t vout2 = vqdmulhq_s16(vi2, vw2);
const int16x8_t vout3 = vqdmulhq_s16(vi3, vw3);
vst1q_s16(output, vout0); output += 8;
vst1q_s16(output, vout1); output += 8;
vst1q_s16(output, vout2); output += 8;
vst1q_s16(output, vout3); output += 8;
}
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
const int16x8_t vout = vqdmulhq_s16(vi, vw);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int16x4_t vout = vqdmulh_s16(vget_low_s16(vi), vget_low_s16(vw));
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vout = vqdmulh_s16(vget_high_s16(vi), vget_high_s16(vw));
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 2,793
| 30.044444
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s16-window/gen/s16-window-shift15-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/s16-window/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/window.h>
void xnn_s16_window_shift15_ukernel__neon_x8(
size_t rows,
size_t channels,
const int16_t* input,
const int16_t* weights,
int16_t* output,
uint32_t shift) XNN_OOB_READS
{
assert(rows != 0);
assert(channels != 0);
assert(input != NULL);
assert(weights != NULL);
assert(output != NULL);
assert(shift == 15);
do {
const int16_t* w = weights;
size_t c = channels;
// Remainder of full vectors
for (; c >= 8 * sizeof(int16_t); c -= 8 * sizeof(int16_t)) {
const int16x8_t vi = vld1q_s16(input); input += 8;
const int16x8_t vw = vld1q_s16(w); w += 8;
const int16x8_t vout = vqdmulhq_s16(vi, vw);
vst1q_s16(output, vout); output += 8;
}
assert(c % 2 == 0);
// Remainder of 1 to 7 channels
if XNN_UNLIKELY(c != 0) {
const int16x8_t vi = vld1q_s16(input); input = (const int16_t*) ((uintptr_t) input + c);
const int16x8_t vw = vld1q_s16(w);
int16x4_t vout = vqdmulh_s16(vget_low_s16(vi), vget_low_s16(vw));
if (c & (4 * sizeof(int16_t))) {
vst1_s16(output, vout); output += 4;
vout = vqdmulh_s16(vget_high_s16(vi), vget_high_s16(vw));
}
if (c & (2 * sizeof(int16_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_s16(vout), 0); output += 2;
vout = vext_s16(vout, vout, 2);
}
if (c & (1 * sizeof(int16_t))) {
vst1_lane_s16(output, vout, 0); output += 1;
}
}
} while (--rows != 0);
}
| 1,888
| 26.376812
| 94
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-scalar-c1.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/math.h>
void xnn_s8_ibilinear_ukernel__scalar_c1(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const int32_t valphah = (int32_t) (uint32_t) (uint16_t) weights[0];
const int32_t valphav = (int32_t) (uint32_t) (uint16_t) weights[1];
weights += 2;
const int32_t vrounding = INT32_C(0x00200000);
size_t c = channels;
do {
const int32_t vtl = (int32_t) *i0++;
const int32_t vtr = (int32_t) *i1++;
const int32_t vbl = (int32_t) *i2++;
const int32_t vbr = (int32_t) *i3++;
const int32_t vtd = vtr - vtl;
const int32_t vbd = vbr - vbl;
const int32_t vt = (int32_t) ((uint32_t) vtl << 11) + vtd * valphah;
const int32_t vb = (int32_t) ((uint32_t) vbl << 11) + vbd * valphah;
const int32_t vd = vb - vt;
const int32_t vacc = (int32_t) ((uint32_t) vt << 11) + vd * valphav;
const int32_t vo = math_asr_s32(vacc + vrounding, 22);
*output++ = vo;
c -= sizeof(int8_t);
} while (c != 0);
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 1,989
| 28.264706
| 77
|
c
|
XNNPACK
|
XNNPACK-master/src/s8-ibilinear/gen/s8-ibilinear-scalar-c2.c
|
// Auto-generated file. Do not edit!
// Template: src/s8-ibilinear/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/ibilinear.h>
#include <xnnpack/math.h>
void xnn_s8_ibilinear_ukernel__scalar_c2(
size_t output_pixels,
size_t channels,
const int8_t** restrict input,
size_t input_offset,
const int16_t* restrict weights,
int8_t* restrict output,
size_t output_increment)
{
assert(output_pixels != 0);
assert(channels != 0);
do {
const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
input += 4;
const int32_t valphah = (int32_t) (uint32_t) (uint16_t) weights[0];
const int32_t valphav = (int32_t) (uint32_t) (uint16_t) weights[1];
weights += 2;
const int32_t vrounding = INT32_C(0x00200000);
size_t c = channels;
for (; c >= 2 * sizeof(int8_t); c -= 2 * sizeof(int8_t)) {
const int32_t vtl0 = (int32_t) i0[0];
const int32_t vtr0 = (int32_t) i1[0];
const int32_t vbl0 = (int32_t) i2[0];
const int32_t vbr0 = (int32_t) i3[0];
const int32_t vtl1 = (int32_t) i0[1];
const int32_t vtr1 = (int32_t) i1[1];
const int32_t vbl1 = (int32_t) i2[1];
const int32_t vbr1 = (int32_t) i3[1];
i0 += 2;
i1 += 2;
i2 += 2;
i3 += 2;
const int32_t vtd0 = vtr0 - vtl0;
const int32_t vbd0 = vbr0 - vbl0;
const int32_t vtd1 = vtr1 - vtl1;
const int32_t vbd1 = vbr1 - vbl1;
const int32_t vt0 = (int32_t) ((uint32_t) vtl0 << 11) + vtd0 * valphah;
const int32_t vb0 = (int32_t) ((uint32_t) vbl0 << 11) + vbd0 * valphah;
const int32_t vt1 = (int32_t) ((uint32_t) vtl1 << 11) + vtd1 * valphah;
const int32_t vb1 = (int32_t) ((uint32_t) vbl1 << 11) + vbd1 * valphah;
const int32_t vd0 = vb0 - vt0;
const int32_t vd1 = vb1 - vt1;
const int32_t vacc0 = (int32_t) ((uint32_t) vt0 << 11) + vd0 * valphav;
const int32_t vacc1 = (int32_t) ((uint32_t) vt1 << 11) + vd1 * valphav;
const int32_t vo0 = math_asr_s32(vacc0 + vrounding, 22);
const int32_t vo1 = math_asr_s32(vacc1 + vrounding, 22);
output[0] = (int8_t) vo0;
output[1] = (int8_t) vo1;
output += 2;
}
for (; c >= sizeof(int8_t); c -= sizeof(int8_t)) {
const int32_t vtl = (int32_t) *i0++;
const int32_t vtr = (int32_t) *i1++;
const int32_t vbl = (int32_t) *i2++;
const int32_t vbr = (int32_t) *i3++;
const int32_t vtd = vtr - vtl;
const int32_t vbd = vbr - vbl;
const int32_t vt = (int32_t) ((uint32_t) vtl << 11) + vtd * valphah;
const int32_t vb = (int32_t) ((uint32_t) vbl << 11) + vbd * valphah;
const int32_t vd = vb - vt;
const int32_t vacc = (int32_t) ((uint32_t) vt << 11) + vd * valphav;
const int32_t vo = math_asr_s32(vacc + vrounding, 22);
*output++ = vo;
}
output = (int8_t*) ((uintptr_t) output + output_increment);
} while (--output_pixels != 0);
}
| 3,389
| 31.912621
| 77
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.