repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,411
| 37.587156
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123);
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,504
| 37.835616
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_4x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x4_t vpacc3x0123 = vpacc0x0123;
uint32x4_t vpacc3x4567 = vpacc0x4567;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
uint32x2_t vnacc3 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 4x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
// Load a 8x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 4x8 * 8x8 --> 4x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
// Load a 4x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 4x4 * 4x8 --> 4x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
vnacc3 = vpadd_u32(vnacc3, vnacc3);
const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
if (nc >= 8) {
vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,264
| 49.40636
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-5x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_5x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 5);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (5 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x4_t vpacc3x0123 = vpacc0x0123;
uint32x4_t vpacc3x4567 = vpacc0x4567;
uint32x4_t vpacc4x0123 = vpacc0x0123;
uint32x4_t vpacc4x4567 = vpacc0x4567;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
uint32x2_t vnacc3 = vmov_n_u32(0);
uint32x2_t vnacc4 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
const uint8_t* restrict a4 = a[4];
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
}
a += 5;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 5x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
// Load a 8x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 5x8 * 8x8 --> 5x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 5x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
// Load a 4x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 5x4 * 4x8 --> 5x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
}
p -= 5 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
vnacc3 = vpadd_u32(vnacc3, vnacc3);
const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
vnacc4 = vpadd_u32(vnacc4, vnacc4);
const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
uint8x8_t vout4x01234567 = vqmovun_s16(vacc4x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
uint8x8_t vout4x01234567 = vqmovun_s16(vacc4x01234567);
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
vout4x01234567 = vmax_u8(vout4x01234567, vget_low_u8(voutput_min));
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
vout4x01234567 = vmin_u8(vout4x01234567, vget_low_u8(voutput_max));
if (nc >= 8) {
vst1_u8(c4 + 0, vout4x01234567);
vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c4, vreinterpret_u32_u8(vout4x01234567), 0); c4 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout4x01234567 = vext_u8(vout4x01234567, vout4x01234567, 4);
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c4, vreinterpret_u16_u8(vout4x01234567), 0); c4 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout4x01234567 = vext_u8(vout4x01234567, vout4x01234567, 2);
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c4, vout4x01234567, 0);
vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,024
| 51.06422
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-6x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_6x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 6);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (6 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr < 4) {
c3 = c2;
}
uint8_t* c4 = (uint8_t*) ((uintptr_t) c3 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 4) {
c4 = c3;
}
uint8_t* c5 = (uint8_t*) ((uintptr_t) c4 + cm_stride);
if XNN_UNPREDICTABLE(mr != 6) {
c5 = c4;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x4_t vpacc3x0123 = vpacc0x0123;
uint32x4_t vpacc3x4567 = vpacc0x4567;
uint32x4_t vpacc4x0123 = vpacc0x0123;
uint32x4_t vpacc4x4567 = vpacc0x4567;
uint32x4_t vpacc5x0123 = vpacc0x0123;
uint32x4_t vpacc5x4567 = vpacc0x4567;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
uint32x2_t vnacc3 = vmov_n_u32(0);
uint32x2_t vnacc4 = vmov_n_u32(0);
uint32x2_t vnacc5 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
const uint8_t* restrict a4 = a[4];
if XNN_UNPREDICTABLE(a4 != zero) {
a4 = (const uint8_t*) ((uintptr_t) a4 + a_offset);
}
const uint8_t* restrict a5 = a[5];
if XNN_UNPREDICTABLE(a5 != zero) {
a5 = (const uint8_t*) ((uintptr_t) a5 + a_offset);
}
a += 6;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 6x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
const uint8x8_t va4x01234567 = vld1_u8(a4); a4 += 8;
const uint8x8_t va5x01234567 = vld1_u8(a5); a5 += 8;
// Load a 8x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 6x8 * 8x8 --> 6x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb4567x0123, va4x01234567, 1);
vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb4567x4567, va4x01234567, 1);
vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb4567x0123, va5x01234567, 1);
vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb4567x4567, va5x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 6x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
const uint8x8_t va4x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a4, vmov_n_u32(0), 0)); a4 += 4;
const uint8x8_t va5x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a5, vmov_n_u32(0), 0)); a5 += 4;
// Load a 4x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 6x4 * 4x8 --> 6x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vnacc4 = vdot_u32(vnacc4, va_zero_point, va4x01234567);
vpacc4x0123 = vdotq_lane_u32(vpacc4x0123, vb0123x0123, va4x01234567, 0);
vpacc4x4567 = vdotq_lane_u32(vpacc4x4567, vb0123x4567, va4x01234567, 0);
vnacc5 = vdot_u32(vnacc5, va_zero_point, va5x01234567);
vpacc5x0123 = vdotq_lane_u32(vpacc5x0123, vb0123x0123, va5x01234567, 0);
vpacc5x4567 = vdotq_lane_u32(vpacc5x4567, vb0123x4567, va5x01234567, 0);
}
p -= 6 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
vnacc3 = vpadd_u32(vnacc3, vnacc3);
const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
vnacc4 = vpadd_u32(vnacc4, vnacc4);
const uint32x4_t vnacc4x0123 = vcombine_u32(vnacc4, vnacc4);
int32x4_t vacc4x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x0123, vnacc4x0123));
int32x4_t vacc4x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc4x4567, vnacc4x0123));
vnacc5 = vpadd_u32(vnacc5, vnacc5);
const uint32x4_t vnacc5x0123 = vcombine_u32(vnacc5, vnacc5);
int32x4_t vacc5x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x0123, vnacc5x0123));
int32x4_t vacc5x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc5x4567, vnacc5x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift);
vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift);
vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift);
vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier);
vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier);
vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier);
vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift);
vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift);
vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift);
vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point);
const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
uint8x16_t vout2x01234567_3x01234567 = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc3x01234567);
uint8x16_t vout4x01234567_5x01234567 = vqmovun_high_s16(vqmovun_s16(vacc4x01234567), vacc5x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point);
const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc3x01234567));
uint8x16_t vout4x01234567_5x01234567 = vcombine_u8(vqmovun_s16(vacc4x01234567), vqmovun_s16(vacc5x01234567));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567_3x01234567 = vmaxq_u8(vout2x01234567_3x01234567, voutput_min);
vout4x01234567_5x01234567 = vmaxq_u8(vout4x01234567_5x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567_3x01234567 = vminq_u8(vout2x01234567_3x01234567, voutput_max);
vout4x01234567_5x01234567 = vminq_u8(vout4x01234567_5x01234567, voutput_max);
if (nc >= 8) {
vst1_u8(c5 + 0, vget_high_u8(vout4x01234567_5x01234567));
vst1_u8(c4 + 0, vget_low_u8(vout4x01234567_5x01234567));
vst1_u8(c3 + 0, vget_high_u8(vout2x01234567_3x01234567));
vst1_u8(c2 + 0, vget_low_u8(vout2x01234567_3x01234567));
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c5 = (uint8_t*) ((uintptr_t) c5 + cn_stride);
c4 = (uint8_t*) ((uintptr_t) c4 + cn_stride);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c5, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 2); c5 += 4;
vst1q_lane_u32((void*) c4, vreinterpretq_u32_u8(vout4x01234567_5x01234567), 0); c4 += 4;
vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4);
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c5, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 4); c5 += 2;
vst1q_lane_u16((void*) c4, vreinterpretq_u16_u8(vout4x01234567_5x01234567), 0); c4 += 2;
vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout4x01234567_5x01234567 = vextq_u8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2);
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_u8(c5, vout4x01234567_5x01234567, 8);
vst1q_lane_u8(c4, vout4x01234567_5x01234567, 0);
vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,696
| 52.964384
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__avx_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,195
| 47.621622
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__avx_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 5,560
| 43.846774
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-avx-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__avx_mul32_ld32_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,919
| 45.984127
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-avx-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__avx_mul32_ld32_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,652
| 42.896226
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__neon_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(¶ms->neon.a_zero_point);
const uint8x16_t vb_zero_point = vld1q_dup_u8(¶ms->neon.b_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->neon.b_zero_point);
#endif
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
const uint8x16_t vb0123456789ABCDEF = vld1q_u8(input_b); input_b += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vget_low_u8(vb_zero_point)));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(vb0123456789ABCDEF, vb_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(vb0123456789ABCDEF), vb_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(vb0123456789ABCDEF), vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vget_low_u8(vb_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 6,518
| 47.649254
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__neon_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,455
| 44.848739
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-neon-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__neon_ld64_x32(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb89ABCDEF = vld1_u8(input_b); input_b += 8;
const uint8x8_t vaGHIJKLMN = vld1_u8(input_a); input_a += 8;
const uint8x8_t vbGHIJKLMN = vld1_u8(input_b); input_b += 8;
const uint8x8_t vaOPQRSTUV = vld1_u8(input_a); input_a += 8;
const uint8x8_t vbOPQRSTUV = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
const int16x8_t vxb89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vb89ABCDEF, vb_zero_point));
const int16x8_t vxaGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vaGHIJKLMN, va_zero_point));
const int16x8_t vxbGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vbGHIJKLMN, vb_zero_point));
const int16x8_t vxaOPQRSTUV = vreinterpretq_s16_u16(vsubl_u8(vaOPQRSTUV, va_zero_point));
const int16x8_t vxbOPQRSTUV = vreinterpretq_s16_u16(vsubl_u8(vbOPQRSTUV, vb_zero_point));
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmulq_s32(vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmulq_s32(vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmulq_s32(vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmulq_s32(vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccOPQR = vmulq_s32(vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
int32x4_t vaccSTUV = vmulq_s32(vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc89AB = vmlaq_s32(vacc89AB, vmovl_s16(vget_low_s16(vxb89ABCDEF)), vb_multiplier);
vaccCDEF = vmlaq_s32(vaccCDEF, vmovl_s16(vget_high_s16(vxb89ABCDEF)), vb_multiplier);
vaccGHIJ = vmlaq_s32(vaccGHIJ, vmovl_s16(vget_low_s16(vxbGHIJKLMN)), vb_multiplier);
vaccKLMN = vmlaq_s32(vaccKLMN, vmovl_s16(vget_high_s16(vxbGHIJKLMN)), vb_multiplier);
vaccOPQR = vmlaq_s32(vaccOPQR, vmovl_s16(vget_low_s16(vxbOPQRSTUV)), vb_multiplier);
vaccSTUV = vmlaq_s32(vaccSTUV, vmovl_s16(vget_high_s16(vxbOPQRSTUV)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,568
| 51.2
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__neon_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->neon.b_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vb_multiplier = vld1q_dup_s32(¶ms->neon.b_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->neon.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t vb01234567 = vld1_u8(input_b); input_b += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const uint8x8_t vb01234567 = vld1_u8(input_b);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxb01234567 = vreinterpretq_s16_u16(vsubl_u8(vb01234567, vb_zero_point));
int32x4_t vacc0123 = vmulq_s32(vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmulq_s32(vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vmlaq_s32(vacc0123, vmovl_s16(vget_low_s16(vxb01234567)), vb_multiplier);
vacc4567 = vmlaq_s32(vacc4567, vmovl_s16(vget_high_s16(vxb01234567)), vb_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 4,167
| 39.862745
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__scalar_x1(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t va_multiplier = params->scalar.a_multiplier;
const int32_t vb_multiplier = params->scalar.b_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
do {
const int32_t va = *input_a++;
const int32_t vb = *input_b++;
const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (uint8_t) (vout + voutput_zero_point);
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,609
| 31.2
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__scalar_x2(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t va_multiplier = params->scalar.a_multiplier;
const int32_t vb_multiplier = params->scalar.b_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
input_a += 2;
const int32_t vb0 = input_b[0];
int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vb1 = input_b[1];
int32_t vacc1 = vbias + va1 * va_multiplier;
input_b += 2;
vacc0 += vb0 * vb_multiplier;
vacc1 += vb1 * vb_multiplier;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = *input_a;
const int32_t vb = *input_b;
const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (uint8_t) (vout + voutput_zero_point);
}
}
| 2,502
| 31.089744
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__scalar_x4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t va_multiplier = params->scalar.a_multiplier;
const int32_t vb_multiplier = params->scalar.b_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
const int32_t va2 = input_a[2];
const int32_t va3 = input_a[3];
input_a += 4;
const int32_t vb0 = input_b[0];
int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vb1 = input_b[1];
int32_t vacc1 = vbias + va1 * va_multiplier;
const int32_t vb2 = input_b[2];
int32_t vacc2 = vbias + va2 * va_multiplier;
const int32_t vb3 = input_b[3];
int32_t vacc3 = vbias + va3 * va_multiplier;
input_b += 4;
vacc0 += vb0 * vb_multiplier;
vacc1 += vb1 * vb_multiplier;
vacc2 += vb2 * vb_multiplier;
vacc3 += vb3 * vb_multiplier;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
int32_t vout2 = math_asr_s32(vacc2, vshift);
int32_t vout3 = math_asr_s32(vacc3, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
vout2 += voutput_zero_point;
vout3 += voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = *input_a++;
const int32_t vb = *input_b++;
const int32_t vacc = vbias + va * va_multiplier + vb * vb_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (uint8_t) (vout + voutput_zero_point);
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 3,376
| 32.77
| 88
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
__m128i vb89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_b + 8));
input_a += 16;
input_b += 16;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
va89ABCDEF = _mm_unpacklo_epi8(va89ABCDEF, vzero);
vb89ABCDEF = _mm_unpacklo_epi8(vb89ABCDEF, vzero);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (int32_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,471
| 46.897436
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
input_a += 8;
input_b += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i vb01234567 = _mm_loadl_epi64((const __m128i*) input_b);
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
vb01234567 = _mm_unpacklo_epi8(vb01234567, vzero);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 5,776
| 43.438462
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
const __m128i vb89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_b + 8)));
input_a += 16;
input_b += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
__m128i vbprod89ABCDEFhi = _mm_mulhi_epu16(vb89ABCDEF, vb_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
const __m128i vbprod89ABCDEFlo = _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
vbprod89ABCDEFhi = _mm_add_epi16(vbprod89ABCDEFhi, _mm_mullo_epi16(vb89ABCDEF, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_unpacklo_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_unpackhi_epi16(vbprod89ABCDEFlo, vbprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,197
| 47.635135
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vb_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_lo);
const __m128i vb_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.b_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
input_a += 8;
input_b += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i vb01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_b));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
__m128i vbprod01234567hi = _mm_mulhi_epu16(vb01234567, vb_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
const __m128i vbprod01234567lo = _mm_mullo_epi16(vb01234567, vb_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vbprod01234567hi = _mm_add_epi16(vbprod01234567hi, _mm_mullo_epi16(vb01234567, vb_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_add_epi32(vacc0123, _mm_unpacklo_epi16(vbprod01234567lo, vbprod01234567hi));
vacc4567 = _mm_add_epi32(vacc4567, _mm_unpackhi_epi16(vbprod01234567lo, vbprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 5,562
| 43.862903
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-sse41-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__sse41_mul32_ld32_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc89AB = _mm_add_epi32(vacc89AB, _mm_mullo_epi32(vb89AB, vb_multiplier));
vaccCDEF = _mm_add_epi32(vaccCDEF, _mm_mullo_epi32(vbCDEF, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,921
| 46
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-sse41-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__sse41_mul32_ld32_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_add_epi32(vacc0123, _mm_mullo_epi32(vb0123, vb_multiplier));
vacc4567 = _mm_add_epi32(vacc4567, _mm_mullo_epi32(vb4567, vb_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,654
| 42.915094
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__wasmsimd_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_u16x8_load8x8(input_b + 8);
input_a += 16;
input_b += 16;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va89ABCDEF), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb89ABCDEF), vb_multiplier));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb89ABCDEF), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_u8x16_max(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,263
| 43.610169
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__wasmsimd_x32(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
const v128_t vb89ABCDEF = wasm_u16x8_load8x8(input_b + 8);
const v128_t vaGHIJKLMN = wasm_u16x8_load8x8(input_a + 16);
const v128_t vbGHIJKLMN = wasm_u16x8_load8x8(input_b + 16);
const v128_t vaOPQRSTUV = wasm_u16x8_load8x8(input_a + 24);
const v128_t vbOPQRSTUV = wasm_u16x8_load8x8(input_b + 24);
input_a += 32;
input_b += 32;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va89ABCDEF), va_multiplier));
v128_t vaccGHIJ = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccKLMN = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccOPQR = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vaOPQRSTUV), va_multiplier));
v128_t vaccSTUV = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vaOPQRSTUV), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
vacc89AB = wasm_i32x4_add(vacc89AB, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb89ABCDEF), vb_multiplier));
vaccCDEF = wasm_i32x4_add(vaccCDEF, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb89ABCDEF), vb_multiplier));
vaccGHIJ = wasm_i32x4_add(vaccGHIJ, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vbGHIJKLMN), vb_multiplier));
vaccKLMN = wasm_i32x4_add(vaccKLMN, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vbGHIJKLMN), vb_multiplier));
vaccOPQR = wasm_i32x4_add(vaccOPQR, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vbOPQRSTUV), vb_multiplier));
vaccSTUV = wasm_i32x4_add(vaccSTUV, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vbOPQRSTUV), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
vaccGHIJ = wasm_i32x4_shr(vaccGHIJ, vshift);
vaccKLMN = wasm_i32x4_shr(vaccKLMN, vshift);
vaccOPQR = wasm_i32x4_shr(vaccOPQR, vshift);
vaccSTUV = wasm_i32x4_shr(vaccSTUV, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t voutGHIJKLMN = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN), voutput_zero_point);
v128_t voutOPQRSTUV = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNOPQRSTUV = wasm_u8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = wasm_u8x16_max(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = wasm_u8x16_max(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = wasm_u8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 7,160
| 50.15
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__wasmsimd_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t vbias = wasm_v128_load64_splat(params->wasmsimd.bias);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const v128_t vb_multiplier = wasm_v128_load64_splat(params->wasmsimd.b_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
input_a += 8;
input_b += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t vb01234567 = wasm_u16x8_load8x8(input_b);
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_add(vacc0123, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vb01234567), vb_multiplier));
vacc4567 = wasm_i32x4_add(vacc4567, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vb01234567), vb_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 4,161
| 40.62
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-xop-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__xop_mul32_ld32_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
const __m128i va89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vb89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 8)));
const __m128i vaCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
const __m128i vbCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc89AB = _mm_macc_epi32(vb89AB, vb_multiplier, vacc89AB);
vaccCDEF = _mm_macc_epi32(vbCDEF, vb_multiplier, vaccCDEF);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,780
| 43.469231
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vadd/gen/qu8-vadd-minmax-xop-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vadd/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vadd_minmax_ukernel__xop_mul32_ld32_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse4.bias);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vb_multiplier = _mm_load_si128((const __m128i*) params->sse4.b_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i vb0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i vb4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_b + 4)));
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_macc_epi32(vb0123, vb_multiplier, vacc0123);
vacc4567 = _mm_macc_epi32(vb4567, vb_multiplier, vacc4567);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,577
| 40.618182
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-fp32-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_fp32__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const float32x4_t vscale = vdupq_n_f32(scale);
#ifdef __aarch64__
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
#else
const float32x4_t vfmin = vdupq_n_f32((float) ((int32_t)(uint32_t) qmin - (int32_t)(uint32_t) zero_point));
const float32x4_t vfmax = vdupq_n_f32((float) ((int32_t)(uint32_t) qmax - (int32_t)(uint32_t) zero_point));
const float32x4_t vfmagic = vdupq_n_f32(12582912.0f);
const int32x4_t vimagic = vdupq_n_s32(INT32_C(0x4B400000) - (int32_t)(uint32_t) zero_point);
#endif
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
// Convert int32_t input to FP32 and multiply by FP32 scale.
// Both operations involve statistically unbiased roundings:
// - Large int32_t values can't be exactly represented as FP32. The conversion instruction in ARM NEON would
// round it to nearest FP32 value with ties to even.
// - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
// to nearest FP32 value with ties to even.
const float32x4_t x_scaled = vmulq_f32(vcvtq_f32_s32(x), vscale);
const float32x4_t y_scaled = vmulq_f32(vcvtq_f32_s32(y), vscale);
const float32x4_t z_scaled = vmulq_f32(vcvtq_f32_s32(z), vscale);
const float32x4_t w_scaled = vmulq_f32(vcvtq_f32_s32(w), vscale);
#ifdef __aarch64__
// Leverage "Floating-point Convert to Signed integer, rounding to nearest with ties to even" instruction.
// This is an ARMv8 instruction (always available in AArch64), which saturates result on overflow.
// We don't need to specifically consider saturated results, they will be clamped at the last stage.
const int32x4_t x_rounded = vcvtnq_s32_f32(x_scaled);
const int32x4_t y_rounded = vcvtnq_s32_f32(y_scaled);
const int32x4_t z_rounded = vcvtnq_s32_f32(z_scaled);
const int32x4_t w_rounded = vcvtnq_s32_f32(w_scaled);
// Standard final sequence on ARM NEON:
// - Pack to int16_t and saturate
// - Add zero point
// - Pack to uint8_t and saturate
// - Clamp between qmin and qmax
const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_rounded), y_rounded), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_rounded), w_rounded), vzero_point);
const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
vst1q_u8(output, xyzw_clamped);
output += 16;
#else
// ARMv7 NEON offers only a floating-point to integer conversion instruction with rounding towards zero.
// In lieu of conversion instruction with rounding-to-nearest-even, we use a magic trick of adding a large
// number (1.5 * 2**23) to scaled value to cause rounding to integer, and then substracing this magic number as
// integer. This trick works only in a limited range (absolute value of input must be less than 2**22), so
// generally we have to clamp input to this range before using the magic. However, clamping to any smaller range
// works just as well, and thus we clamp to [qmin - zero point, qmax - zero point] range so that after we add
// zero point to the result, it gets into target [qmin, qmax] range.
const float32x4_t x_clamped = vminq_f32(vmaxq_f32(x_scaled, vfmin), vfmax);
const float32x4_t y_clamped = vminq_f32(vmaxq_f32(y_scaled, vfmin), vfmax);
const float32x4_t z_clamped = vminq_f32(vmaxq_f32(z_scaled, vfmin), vfmax);
const float32x4_t w_clamped = vminq_f32(vmaxq_f32(w_scaled, vfmin), vfmax);
// Conversion to integer using the "magic trick". Rounding is performed in the output of addition operation,
// and result is rounded to nearest even integer with ties to even.
const int32x4_t x_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(x_clamped, vfmagic)), vimagic);
const int32x4_t y_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(y_clamped, vfmagic)), vimagic);
const int32x4_t z_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(z_clamped, vfmagic)), vimagic);
const int32x4_t w_biased = vsubq_s32(vreinterpretq_s32_f32(vaddq_f32(w_clamped, vfmagic)), vimagic);
// Select low 8 bits of each 32-bit integer in the vectors for the output.
// Since result is already clamped to [qmin, qmax] subrange of [0, 255], saturation is not needed.
const int16x8_t xy_packed = vcombine_s16(vmovn_s32(x_biased), vmovn_s32(y_biased));
const int16x8_t zw_packed = vcombine_s16(vmovn_s32(z_biased), vmovn_s32(w_biased));
const uint8x16_t xyzw_packed = vreinterpretq_u8_s8(vcombine_s8(vmovn_s16(xy_packed), vmovn_s16(zw_packed)));
// AArch32 version:
// 4x VCVT.F32.S32 Qd, Qm
// 4x VMUL.F32 Qd, Qm, Qn
// 4x VMIN.F32 Qd, Qm, Qn
// 4x VMAX.F32 Qd, Qm, Qn
// 4x VADD.F32 Qd, Qm, Qn
// 4x VSUB.S32 Qd, Qm, Qn
// 4x VMOVN.I32 Dd, Qm
// 2x VMOVN.I16 Dd, Qm
// ---------------------
// 30 instructions total
//
// AArch64 version:
// 4x SCVTF Vd.4S, Vn.4S
// 4x FMUL Vd.4S, Vn.4S, Vm.4S
// 4x FCVTNS Vd.4S, Vn.4S
// 2x SQXTN Vd.4H, Vn.4S
// 2x SQXTN2 Vd.8H, Vn.4S
// 2x SQADD Vd.8H, Vn.8H, Vm.8H
// 1x SQXTUN Vd.8B, Vn.8H
// 1x SQXTUN2 Vd.16B, Vn.8H
// 1x UMIN Vd.16B, Vn.16B, Vm.16B
// 1x UMAX Vd.16B, Vn.16B, Vm.16B
// ---------------------
// 22 instructions total
vst1q_u8(output, xyzw_packed);
output += 16;
#endif
}
}
| 6,446
| 45.381295
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-fp32-scalar-fmagic.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_fp32__scalar_fmagic(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const float fmin = (float) ((int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point);
const float fmax = (float) ((int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point);
const float fmagic = 12582912.0f;
const int32_t imagic = INT32_C(0x4B400000) - (int32_t)(uint32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
const float x_scaled = (float) x * scale;
const float y_scaled = (float) y * scale;
const float z_scaled = (float) z * scale;
const float w_scaled = (float) w * scale;
const float x_clamped = math_min_f32(math_max_f32(x_scaled, fmin), fmax);
const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax);
const float z_clamped = math_min_f32(math_max_f32(z_scaled, fmin), fmax);
const float w_clamped = math_min_f32(math_max_f32(w_scaled, fmin), fmax);
const int32_t x_biased = (int32_t) float_as_uint32(x_clamped + fmagic) - imagic;
const int32_t y_biased = (int32_t) float_as_uint32(y_clamped + fmagic) - imagic;
const int32_t z_biased = (int32_t) float_as_uint32(z_clamped + fmagic) - imagic;
const int32_t w_biased = (int32_t) float_as_uint32(w_clamped + fmagic) - imagic;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
output[2] = (uint8_t) z_biased;
output[3] = (uint8_t) w_biased;
output += 4;
}
}
| 2,133
| 32.34375
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-fp32-scalar-lrintf.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_fp32__scalar_lrintf(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const float fmin = (float) ((int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point);
const float fmax = (float) ((int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point);
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
const float x_scaled = (float) x * scale;
const float y_scaled = (float) y * scale;
const float z_scaled = (float) z * scale;
const float w_scaled = (float) w * scale;
const float x_clamped = math_min_f32(math_max_f32(x_scaled, fmin), fmax);
const float y_clamped = math_min_f32(math_max_f32(y_scaled, fmin), fmax);
const float z_clamped = math_min_f32(math_max_f32(z_scaled, fmin), fmax);
const float w_clamped = math_min_f32(math_max_f32(w_scaled, fmin), fmax);
const int32_t x_rounded = (int32_t) lrintf(x_clamped);
const int32_t y_rounded = (int32_t) lrintf(y_clamped);
const int32_t z_rounded = (int32_t) lrintf(z_clamped);
const int32_t w_rounded = (int32_t) lrintf(w_clamped);
const int32_t x_biased = x_rounded + (int32_t) (uint32_t) zero_point;
const int32_t y_biased = y_rounded + (int32_t) (uint32_t) zero_point;
const int32_t z_biased = z_rounded + (int32_t) (uint32_t) zero_point;
const int32_t w_biased = w_rounded + (int32_t) (uint32_t) zero_point;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
output[2] = (uint8_t) z_biased;
output[3] = (uint8_t) w_biased;
output += 4;
}
}
| 2,212
| 31.544118
| 91
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-fp32-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_fp32__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const __m128 vscale = _mm_set1_ps(scale);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
// Convert int32_t input to FP32 and multiply by FP32 scale.
// Both operations involve statistically unbiased roundings (with default MXCSR rounding mode):
// - Large int32_t values can't be exactly represented as FP32. CVTDQ2PS instruction on x86 would round it
// according to nearest FP32 value with ties to even (assuming default MXCSR rounding mode).
// - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
// to nearest FP32 value with ties to even with default MXCSR rounding mode.
const __m128 x_scaled = _mm_mul_ps(_mm_cvtepi32_ps(x), vscale);
const __m128 y_scaled = _mm_mul_ps(_mm_cvtepi32_ps(y), vscale);
const __m128 z_scaled = _mm_mul_ps(_mm_cvtepi32_ps(z), vscale);
const __m128 w_scaled = _mm_mul_ps(_mm_cvtepi32_ps(w), vscale);
// Convert scaled FP32 result to int32_t using CVTPS2DQ instruction from x86 SSE2. CVTPS2DQ instruction rounds
// result according to nearest FP32 value with ties to even (assuming default MXCSR rounding mode).
// However, when conversion overflows, it produces INT32_MIN as a result. For large positive inputs the result
// of conversion can become negative, which affects the final requantization result. Note that on x86 SSE2 we
// have e.g. int32_t(float(INT32_MAX)) == INT32_MIN! This happens because float(INT32_MAX) rounds to 2**31,
// which overflows int32_t when it is converted back to integer.
//
// Thankfully, we can prove that overflow never happens in this requantization scheme. The largest positive
// input is INT32_MAX (2**31 - 1), which turns into 2**31 when converted to float. The largest scale value
// is 0x1.FFFFFEp-1. When multiplied together, the result is 2147483520 (compare to INT32_MAX = 2147483647),
// which fits into int32_t without overflow.
const __m128i x_rounded = _mm_cvtps_epi32(x_scaled);
const __m128i y_rounded = _mm_cvtps_epi32(y_scaled);
const __m128i z_rounded = _mm_cvtps_epi32(z_scaled);
const __m128i w_rounded = _mm_cvtps_epi32(w_scaled);
// Standard final sequence on x86 SSE2:
// - Pack to int16_t and saturate
// - Add zero point
// - Pack to uint8_t and saturate
// - Clamp between qmin and qmax
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_rounded, y_rounded), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_rounded, w_rounded), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 4x CVTDQ2PS
// 4x MULPS
// 4x CVTPS2DQ
// 2x PACKSSDW
// 1x PACKUSWB
// 2x PADDW
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 19 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 4,083
| 42.446809
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-fp32-wasmsimd.c
|
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_fp32__wasmsimd(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const v128_t vscale = wasm_f32x4_splat(scale);
const v128_t vfmin = wasm_f32x4_splat((float) ((int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point));
const v128_t vfmax = wasm_f32x4_splat((float) ((int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point));
const v128_t vfmagic = wasm_f32x4_const_splat(12582912.0f);
const v128_t vimagic = wasm_i32x4_splat(INT32_C(0x4B400000) - (int32_t) (uint32_t) zero_point);
for (; n != 0; n -= 16) {
const v128_t x = wasm_v128_load(input);
const v128_t y = wasm_v128_load(input + 4);
const v128_t z = wasm_v128_load(input + 8);
const v128_t w = wasm_v128_load(input + 12);
input += 16;
// Convert int32_t input to FP32 and multiply by FP32 scale.
// Both operations involve statistically unbiased roundings:
// - Large int32_t values can't be exactly represented as FP32. The conversion instruction in WAsm SIMD would
// round it to nearest FP32 value with ties to even.
// - Product of two FP32 values is generally not exactly representation as an FP32 value, and will be rounded
// to nearest FP32 value with ties to even.
const v128_t x_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(x), vscale);
const v128_t y_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(y), vscale);
const v128_t z_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(z), vscale);
const v128_t w_scaled = wasm_f32x4_mul(wasm_f32x4_convert_i32x4(w), vscale);
// WAsm SIMD offers only a floating-point to integer conversion instruction with rounding towards zero.
// In lieu of conversion instruction with rounding-to-nearest-even, we use a magic trick of adding a large
// number (1.5 * 2**23) to scaled value to cause rounding to integer, and then substracing this magic number as
// integer. This trick works only in a limited range (absolute value of input must be less than 2**22), so
// generally we have to clamp input to this range before using the magic. However, clamping to any smaller range
// works just as well, and thus we clamp to [qmin - zero point, qmax - zero point] range so that after we add
// zero point to the result, it gets into target [qmin, qmax] range.
const v128_t x_clamped = wasm_f32x4_min(wasm_f32x4_max(x_scaled, vfmin), vfmax);
const v128_t y_clamped = wasm_f32x4_min(wasm_f32x4_max(y_scaled, vfmin), vfmax);
const v128_t z_clamped = wasm_f32x4_min(wasm_f32x4_max(z_scaled, vfmin), vfmax);
const v128_t w_clamped = wasm_f32x4_min(wasm_f32x4_max(w_scaled, vfmin), vfmax);
// Conversion to integer using the "magic trick". Rounding is performed in the output of addition operation,
// and result is rounded to nearest even integer with ties to even.
const v128_t x_biased = wasm_i32x4_sub(wasm_f32x4_add(x_clamped, vfmagic), vimagic);
const v128_t y_biased = wasm_i32x4_sub(wasm_f32x4_add(y_clamped, vfmagic), vimagic);
const v128_t z_biased = wasm_i32x4_sub(wasm_f32x4_add(z_clamped, vfmagic), vimagic);
const v128_t w_biased = wasm_i32x4_sub(wasm_f32x4_add(w_clamped, vfmagic), vimagic);
// Select low 8 bits of each 32-bit integer in the vectors for the output.
// Since result is already clamped to [qmin, qmax] subrange of [0, 255], saturation is not needed.
const v128_t xy_packed = wasm_v16x8_shuffle(x_biased, y_biased, 0, 2, 4, 6, 8, 10, 12, 14);
const v128_t zw_packed = wasm_v16x8_shuffle(z_biased, w_biased, 0, 2, 4, 6, 8, 10, 12, 14);
const v128_t xyzw_packed = wasm_v8x16_shuffle(xy_packed, zw_packed, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30);
// 4x f32x4.convert_i32x4_s
// 4x f32x4.mul
// 4x f32x4.max
// 4x f32x4.min
// 4x f32x4.add
// 4x i32x4.sub
// 3x v8x16.shuffle
// ---------------------
// 29 instructions total
wasm_v128_store(output, xyzw_packed);
output += 16;
}
}
| 4,437
| 48.311111
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-gemmlowp-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_gemmlowp__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
const int32x4_t vshift = vdupq_n_s32(-shift);
const int32x4_t vshift_eq_0_mask = vreinterpretq_s32_u32(vceqq_s32(vshift, vmovq_n_s32(0)));
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
// Directly use VQRDMULH/SQRDMULH instruction for Q31 multiplication with rounding.
// Although these instruction saturate out-of-range outputs, we never hit this case in requantization.
const int32x4_t x_product = vqrdmulhq_s32(x, vmultiplier);
const int32x4_t y_product = vqrdmulhq_s32(y, vmultiplier);
const int32x4_t z_product = vqrdmulhq_s32(z, vmultiplier);
const int32x4_t w_product = vqrdmulhq_s32(w, vmultiplier);
// Shift the 32-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded away from zero.
//
// We leverage the "right shift with rounding" instruction (VRSHL.S32 on ARM NEON, SRSHL in ARM64 Advanced SIMD) to
// do the shift. However, as this instruction rounds midpoints up, rather than away from zero, we adjust the input
// by subtracting 1 from negative values, but only if shift is non-zero.
const int32x4_t x_adjusted_product = vsraq_n_s32(x_product, vbicq_s32(x, vshift_eq_0_mask), 31);
const int32x4_t y_adjusted_product = vsraq_n_s32(y_product, vbicq_s32(y, vshift_eq_0_mask), 31);
const int32x4_t z_adjusted_product = vsraq_n_s32(z_product, vbicq_s32(z, vshift_eq_0_mask), 31);
const int32x4_t w_adjusted_product = vsraq_n_s32(w_product, vbicq_s32(w, vshift_eq_0_mask), 31);
const int32x4_t x_scaled = vrshlq_s32(x_adjusted_product, vshift);
const int32x4_t y_scaled = vrshlq_s32(y_adjusted_product, vshift);
const int32x4_t z_scaled = vrshlq_s32(z_adjusted_product, vshift);
const int32x4_t w_scaled = vrshlq_s32(w_adjusted_product, vshift);
#ifdef __aarch64__
const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
#else
const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const uint8x16_t xyzw_packed = vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
#endif
const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
// AArch32 version:
// 4x VQRDMULH.S32 Qd, Qm, Qn
// 4x VAND Qd, Qm, Dn
// 4x VSRA.S32 Qd, Qm, #31
// 4x VRSHL.S32 Qd, Qm, Qn
// 4x VQMOVN.S32 Dd, Qm
// 2x VQADD.S16 Qd, Qm, Qn
// 2x VQMOVUN.S16 Dd, Qm
// 1x VMAX.U8 Qd, Qm, Qn
// 1x VMIN.U8 Qd, Qm, Qn
// ---------------------
// 26 instructions total
//
// AArch64 version:
// 4x SQRDMULH Vd.4S, Vn.4S, Vm.4S
// 4x AND Vd.16B, Vn.16B, Vm.16B
// 4x SSRA Vd.4S, Vn.4S, #31
// 4x SRSHL Vd.4S, Vn.4S, Vm.4S
// 2x SQXTN Vd.4H, Vn.4S
// 2x SQXTN2 Vd.8H, Vn.4S
// 2x SQADD Vd.8H, Vn.8H, Vm.8H
// 1x SQXTUN Vd.8B, Vn.8H
// 1x SQXTUN2 Vd.16B, Vn.8H
// 1x UMIN Vd.16B, Vn.16B, Vm.16B
// 1x UMAX Vd.16B, Vn.16B, Vm.16B
// ---------------------
// 26 instructions total
vst1q_u8(output, xyzw_clamped);
output += 16;
}
}
| 5,015
| 39.128
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-gemmlowp-scalar.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_gemmlowp__scalar(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const int64_t q31rounding = INT64_C(0x40000000);
const int32_t remainder_mask = (int32_t) ((UINT32_C(1) << shift) - UINT32_C(1));
const int32_t threshold = (int32_t) ((uint32_t) remainder_mask >> 1);
const int32_t smin = (int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point;
const int32_t smax = (int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute full 64-bit product of signed 32-bit factors.
//
// Note: multiplier can be treated as either signed or unsigned.
const int64_t x_product = (int64_t) x * (int64_t) multiplier;
const int64_t y_product = (int64_t) y * (int64_t) multiplier;
const int64_t z_product = (int64_t) z * (int64_t) multiplier;
const int64_t w_product = (int64_t) w * (int64_t) multiplier;
// Get the Q31 multiplication result by extracting bits 31-62 of the product, with rounding up.
// Add rounding value (0x40000000) and then shift right by 31 bits and extract the low 32-bit word.
// Note: casts to unsigned types are needed to avoid undefined behavior.
// Given the multiplier range, the result of Q31 multiplication is in [-2147483520, 2147483519] range.
const int32_t x_q31product = (int32_t) (uint32_t) ((uint64_t) (x_product + q31rounding) >> 31);
const int32_t y_q31product = (int32_t) (uint32_t) ((uint64_t) (y_product + q31rounding) >> 31);
const int32_t z_q31product = (int32_t) (uint32_t) ((uint64_t) (z_product + q31rounding) >> 31);
const int32_t w_q31product = (int32_t) (uint32_t) ((uint64_t) (w_product + q31rounding) >> 31);
// Arithmetically shift the adjusted product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded away from zero.
//
// Shift with correct rounding could be efficiently implemented by pre-adding rounding constant, but with input in
// [-2147483520, 2147483519] range and rounding constant up to 2**30 we can't rule out overflow. This limitation
// leaves us with 3 options:
// 1. Extend input to 64-bit signed integer, perform addition and shift on 64-bit integers, then truncate result
// to 32 bits.
// 2. Detect overflow and handle this situation separately. Note that overflow is possible only when input is
// positive, and even when addition of a rounding constant overflows 32-bit signed integer, it still doesn't
// overflow 32-bit unsigned integer. Thus, in case of signed overflow, we can compute the result using unsigned
// arithmetics, specifically using logical shift right instead of arithmetic shift right.
// 3. Performs arithmetic shift as is, which will produce division result rounded down. Then compute remainder of
// this division by a power of 2, and adjust the result. Result needs adjustment (increment by 1) when
// - input is positive, shift is non-zero, and remainder >= 2**(shift - 1), e.g. 10 >> 2 needs adjustment
// - input is negative, shift is non-zero, and remainder > 2**(shift - 1), e.g. -10 >> 2 doesn't need adjustment
// These conditions can be generalized as
// remainder + (input <= 0) > 2**(shift - 1)
// or equivalently
// remainder - (input < 0) > ((2**shift - 1) >> 1)
// When shift is 0, remainder is 0 as well, the last condition is always false, and no adjustment is done.
//
// Among these options, option 3 is the most performant across the board, although option 1 is promising for 64-bit
// instruction sets.
const int32_t x_remainder = (x_q31product & remainder_mask) - (int32_t) (x_q31product < 0);
const int32_t y_remainder = (y_q31product & remainder_mask) - (int32_t) (y_q31product < 0);
const int32_t z_remainder = (z_q31product & remainder_mask) - (int32_t) (z_q31product < 0);
const int32_t w_remainder = (w_q31product & remainder_mask) - (int32_t) (w_q31product < 0);
const int32_t x_scaled = math_asr_s32(x_q31product, shift) + (int32_t) (x_remainder > threshold);
const int32_t y_scaled = math_asr_s32(y_q31product, shift) + (int32_t) (y_remainder > threshold);
const int32_t z_scaled = math_asr_s32(z_q31product, shift) + (int32_t) (z_remainder > threshold);
const int32_t w_scaled = math_asr_s32(w_q31product, shift) + (int32_t) (w_remainder > threshold);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 255) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
output[2] = (uint8_t) z_biased;
output[3] = (uint8_t) w_biased;
output += 4;
}
}
| 6,729
| 51.170543
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-gemmlowp-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_gemmlowp__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
const __m128i y_abs = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
const __m128i z_abs = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
const __m128i w_abs = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
const __m128i x_neg_mask_even = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i y_neg_mask_even = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i z_neg_mask_even = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i w_neg_mask_even = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
const __m128i x_rounded_product_even = _mm_add_epi64(x_product_even, vq31rounding);
const __m128i y_rounded_product_even = _mm_add_epi64(y_product_even, vq31rounding);
const __m128i z_rounded_product_even = _mm_add_epi64(z_product_even, vq31rounding);
const __m128i w_rounded_product_even = _mm_add_epi64(w_product_even, vq31rounding);
const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
const __m128i x_neg_mask_odd = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_neg_mask_odd = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_neg_mask_odd = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_neg_mask_odd = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
const __m128i x_rounded_product_odd = _mm_add_epi64(x_product_odd, vq31rounding);
const __m128i y_rounded_product_odd = _mm_add_epi64(y_product_odd, vq31rounding);
const __m128i z_rounded_product_odd = _mm_add_epi64(z_product_odd, vq31rounding);
const __m128i w_rounded_product_odd = _mm_add_epi64(w_product_odd, vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_rounded_product_even, 31);
const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
const __m128i y_q31product_even = _mm_srli_epi64(y_rounded_product_even, 31);
const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
const __m128i z_q31product_even = _mm_srli_epi64(z_rounded_product_even, 31);
const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
const __m128i w_q31product_even = _mm_srli_epi64(w_rounded_product_even, 31);
const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_q31product_even), _mm_castsi128_ps(x_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_q31product_even), _mm_castsi128_ps(y_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_q31product_even), _mm_castsi128_ps(z_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_q31product_even), _mm_castsi128_ps(w_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_q31product = _mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_q31product = _mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_q31product = _mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_q31product = _mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_remainder =
_mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder =
_mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder =
_mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder =
_mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled =
_mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled =
_mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled =
_mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled =
_mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 16x PSHUFD
// 4x SHUFPS
// 8x PMULUDQ
// 8x PXOR (setzero)
// 12x PXOR
// 4x PAND
// 8x PADDQ
// 4x PADDD
// 2x PADDW
// 8x PSUBQ
// 8x PSUBD
// 8x PSRLQ (immediate)
// 4x PSRAD (register)
// 12x PCMPGTD
// 2x PACKSSDW
// 1x PACKUSWB
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 111 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 9,694
| 50.84492
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-gemmlowp-sse41.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_gemmlowp__sse41(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_rev = _mm_shuffle_epi32(x, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_rev = _mm_shuffle_epi32(y, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_rev = _mm_shuffle_epi32(z, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_rev = _mm_shuffle_epi32(w, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_product_even = _mm_add_epi64(_mm_mul_epi32(x, vmultiplier), vq31rounding);
const __m128i y_product_even = _mm_add_epi64(_mm_mul_epi32(y, vmultiplier), vq31rounding);
const __m128i z_product_even = _mm_add_epi64(_mm_mul_epi32(z, vmultiplier), vq31rounding);
const __m128i w_product_even = _mm_add_epi64(_mm_mul_epi32(w, vmultiplier), vq31rounding);
const __m128i x_product_odd = _mm_add_epi64(_mm_mul_epi32(x_rev, vmultiplier), vq31rounding);
const __m128i y_product_odd = _mm_add_epi64(_mm_mul_epi32(y_rev, vmultiplier), vq31rounding);
const __m128i z_product_odd = _mm_add_epi64(_mm_mul_epi32(z_rev, vmultiplier), vq31rounding);
const __m128i w_product_odd = _mm_add_epi64(_mm_mul_epi32(w_rev, vmultiplier), vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_product_even, 31);
const __m128i x_q31product_odd = _mm_add_epi64(x_product_odd, x_product_odd);
const __m128i y_q31product_even = _mm_srli_epi64(y_product_even, 31);
const __m128i y_q31product_odd = _mm_add_epi64(y_product_odd, y_product_odd);
const __m128i z_q31product_even = _mm_srli_epi64(z_product_even, 31);
const __m128i z_q31product_odd = _mm_add_epi64(z_product_odd, z_product_odd);
const __m128i w_q31product_even = _mm_srli_epi64(w_product_even, 31);
const __m128i w_q31product_odd = _mm_add_epi64(w_product_odd, w_product_odd);
const __m128i x_q31product = _mm_blend_epi16(x_q31product_even, x_q31product_odd, 0xCC);
const __m128i y_q31product = _mm_blend_epi16(y_q31product_even, y_q31product_odd, 0xCC);
const __m128i z_q31product = _mm_blend_epi16(z_q31product_even, z_q31product_odd, 0xCC);
const __m128i w_q31product = _mm_blend_epi16(w_q31product_even, w_q31product_odd, 0xCC);
const __m128i x_remainder =
_mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder =
_mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder =
_mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder =
_mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled =
_mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled =
_mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled =
_mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled =
_mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 4x PSHUFD
// 8x PMULDQ
// 12x PADDQ
// 4x PADDD
// 2x PADDW
// 4x PSUBD
// 4x PSLRQ (immediate)
// 4x PSRAD (register)
// 4x PBLENDW
// 4x PAND
// 4x PXOR (setzero)
// 8x PCMPGTD
// 2x PACKSSDW
// 1x PACKUSWB
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 67 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 6,052
| 43.507353
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-gemmlowp-ssse3.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <tmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_gemmlowp__ssse3(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t)(((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const __m128i vremainder_mask = _mm_set1_epi32((int) remainder_mask);
const __m128i vthreshold = _mm_set1_epi32((int) (remainder_mask >> 1));
const __m128i vq31rounding = _mm_set1_epi64x(UINT64_C(0x40000000));
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_abs = _mm_abs_epi32(x);
const __m128i y_abs = _mm_abs_epi32(y);
const __m128i z_abs = _mm_abs_epi32(z);
const __m128i w_abs = _mm_abs_epi32(w);
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs_rev = _mm_shuffle_epi32(x_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs_rev = _mm_shuffle_epi32(y_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs_rev = _mm_shuffle_epi32(z_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs_rev = _mm_shuffle_epi32(w_abs, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_abs_product_even = _mm_mul_epu32(x_abs, vmultiplier);
const __m128i y_abs_product_even = _mm_mul_epu32(y_abs, vmultiplier);
const __m128i z_abs_product_even = _mm_mul_epu32(z_abs, vmultiplier);
const __m128i w_abs_product_even = _mm_mul_epu32(w_abs, vmultiplier);
const __m128i x_neg_mask_even = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i y_neg_mask_even = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i z_neg_mask_even = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i w_neg_mask_even = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(2, 2, 0, 0));
const __m128i x_product_even = _mm_sub_epi64(_mm_xor_si128(x_abs_product_even, x_neg_mask_even), x_neg_mask_even);
const __m128i y_product_even = _mm_sub_epi64(_mm_xor_si128(y_abs_product_even, y_neg_mask_even), y_neg_mask_even);
const __m128i z_product_even = _mm_sub_epi64(_mm_xor_si128(z_abs_product_even, z_neg_mask_even), z_neg_mask_even);
const __m128i w_product_even = _mm_sub_epi64(_mm_xor_si128(w_abs_product_even, w_neg_mask_even), w_neg_mask_even);
const __m128i x_rounded_product_even = _mm_add_epi64(x_product_even, vq31rounding);
const __m128i y_rounded_product_even = _mm_add_epi64(y_product_even, vq31rounding);
const __m128i z_rounded_product_even = _mm_add_epi64(z_product_even, vq31rounding);
const __m128i w_rounded_product_even = _mm_add_epi64(w_product_even, vq31rounding);
const __m128i x_abs_product_odd = _mm_mul_epu32(x_abs_rev, vmultiplier);
const __m128i y_abs_product_odd = _mm_mul_epu32(y_abs_rev, vmultiplier);
const __m128i z_abs_product_odd = _mm_mul_epu32(z_abs_rev, vmultiplier);
const __m128i w_abs_product_odd = _mm_mul_epu32(w_abs_rev, vmultiplier);
const __m128i x_neg_mask_odd = _mm_shuffle_epi32(x_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i y_neg_mask_odd = _mm_shuffle_epi32(y_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i z_neg_mask_odd = _mm_shuffle_epi32(z_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i w_neg_mask_odd = _mm_shuffle_epi32(w_neg_mask, _MM_SHUFFLE(3, 3, 1, 1));
const __m128i x_product_odd = _mm_sub_epi64(_mm_xor_si128(x_abs_product_odd, x_neg_mask_odd), x_neg_mask_odd);
const __m128i y_product_odd = _mm_sub_epi64(_mm_xor_si128(y_abs_product_odd, y_neg_mask_odd), y_neg_mask_odd);
const __m128i z_product_odd = _mm_sub_epi64(_mm_xor_si128(z_abs_product_odd, z_neg_mask_odd), z_neg_mask_odd);
const __m128i w_product_odd = _mm_sub_epi64(_mm_xor_si128(w_abs_product_odd, w_neg_mask_odd), w_neg_mask_odd);
const __m128i x_rounded_product_odd = _mm_add_epi64(x_product_odd, vq31rounding);
const __m128i y_rounded_product_odd = _mm_add_epi64(y_product_odd, vq31rounding);
const __m128i z_rounded_product_odd = _mm_add_epi64(z_product_odd, vq31rounding);
const __m128i w_rounded_product_odd = _mm_add_epi64(w_product_odd, vq31rounding);
const __m128i x_q31product_even = _mm_srli_epi64(x_rounded_product_even, 31);
const __m128i x_q31product_odd = _mm_srli_epi64(x_rounded_product_odd, 31);
const __m128i y_q31product_even = _mm_srli_epi64(y_rounded_product_even, 31);
const __m128i y_q31product_odd = _mm_srli_epi64(y_rounded_product_odd, 31);
const __m128i z_q31product_even = _mm_srli_epi64(z_rounded_product_even, 31);
const __m128i z_q31product_odd = _mm_srli_epi64(z_rounded_product_odd, 31);
const __m128i w_q31product_even = _mm_srli_epi64(w_rounded_product_even, 31);
const __m128i w_q31product_odd = _mm_srli_epi64(w_rounded_product_odd, 31);
const __m128i x_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(x_q31product_even), _mm_castsi128_ps(x_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(y_q31product_even), _mm_castsi128_ps(y_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(z_q31product_even), _mm_castsi128_ps(z_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_q31product_0213 = _mm_castps_si128(_mm_shuffle_ps(
_mm_castsi128_ps(w_q31product_even), _mm_castsi128_ps(w_q31product_odd), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_q31product = _mm_shuffle_epi32(x_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_q31product = _mm_shuffle_epi32(y_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_q31product = _mm_shuffle_epi32(z_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_q31product = _mm_shuffle_epi32(w_q31product_0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_remainder =
_mm_add_epi32(_mm_and_si128(x_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), x_q31product));
const __m128i y_remainder =
_mm_add_epi32(_mm_and_si128(y_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), y_q31product));
const __m128i z_remainder =
_mm_add_epi32(_mm_and_si128(z_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), z_q31product));
const __m128i w_remainder =
_mm_add_epi32(_mm_and_si128(w_q31product, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), w_q31product));
const __m128i x_scaled =
_mm_sub_epi32(_mm_sra_epi32(x_q31product, vshift), _mm_cmpgt_epi32(x_remainder, vthreshold));
const __m128i y_scaled =
_mm_sub_epi32(_mm_sra_epi32(y_q31product, vshift), _mm_cmpgt_epi32(y_remainder, vthreshold));
const __m128i z_scaled =
_mm_sub_epi32(_mm_sra_epi32(z_q31product, vshift), _mm_cmpgt_epi32(z_remainder, vthreshold));
const __m128i w_scaled =
_mm_sub_epi32(_mm_sra_epi32(w_q31product, vshift), _mm_cmpgt_epi32(w_remainder, vthreshold));
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 16x PSHUFD
// 4x SHUFPS
// 8x PMULUDQ
// 8x PXOR (setzero)
// 8x PXOR
// 4x PAND
// 8x PADDQ
// 4x PADDD
// 2x PADDW
// 8x PSUBQ
// 4x PSUBD
// 8x PSRLQ (immediate)
// 4x PSRAD (register)
// 12x PCMPGTD
// 4x PABSD
// 2x PACKSSDW
// 1x PACKUSWB
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 107 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 9,554
| 49.824468
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-gemmlowp-wasmsimd.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <wasm_simd128.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_gemmlowp__wasmsimd(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
// Compute requantization parameters.
const uint32_t scale_bits = float_as_uint32(scale);
// Multiplier is in [0x40000000, 0x7FFFFF80] range.
const int32_t multiplier = (int32_t) (((scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000)) << 7);
assert(multiplier >= INT32_C(0x40000000));
assert(multiplier <= INT32_C(0x7FFFFF80));
const int64_t twice_multiplier = INT64_C(2) * (int64_t) multiplier;
// Shift is in [0, 31] range.
const int32_t shift = 127 + 31 - 32 - (float_as_uint32(scale) >> 23);
assert(shift >= 0);
assert(shift < 32);
const v128_t vmultiplier = wasm_i64x2_make(twice_multiplier, twice_multiplier);
const v128_t vzero_point = wasm_i16x8_splat((int16_t) (uint16_t) zero_point);
const v128_t vqmin = wasm_i8x16_splat((int8_t) qmin);
const v128_t vqmax = wasm_i8x16_splat((int8_t) qmax);
const uint32_t remainder_mask = (UINT32_C(1) << shift) - UINT32_C(1);
const v128_t vremainder_mask = wasm_i32x4_splat((int32_t) remainder_mask);
const v128_t vthreshold = wasm_i32x4_splat((int32_t) (remainder_mask >> 1));
const v128_t vtwice_q31rounding = wasm_i64x2_splat(INT64_C(0x80000000));
for (; n != 0; n -= 16) {
const v128_t x = wasm_v128_load(input);
const v128_t y = wasm_v128_load(input + 4);
const v128_t z = wasm_v128_load(input + 8);
const v128_t w = wasm_v128_load(input + 12);
input += 16;
const v128_t x_sign = wasm_i32x4_shr(x, 31);
const v128_t y_sign = wasm_i32x4_shr(y, 31);
const v128_t z_sign = wasm_i32x4_shr(z, 31);
const v128_t w_sign = wasm_i32x4_shr(w, 31);
const v128_t x_lo = wasm_v32x4_shuffle(x, x_sign, 0, 4, 1, 5);
const v128_t y_lo = wasm_v32x4_shuffle(y, y_sign, 0, 4, 1, 5);
const v128_t z_lo = wasm_v32x4_shuffle(z, z_sign, 0, 4, 1, 5);
const v128_t w_lo = wasm_v32x4_shuffle(w, w_sign, 0, 4, 1, 5);
const v128_t x_hi = wasm_v32x4_shuffle(x, x_sign, 2, 6, 3, 7);
const v128_t y_hi = wasm_v32x4_shuffle(y, y_sign, 2, 6, 3, 7);
const v128_t z_hi = wasm_v32x4_shuffle(z, z_sign, 2, 6, 3, 7);
const v128_t w_hi = wasm_v32x4_shuffle(w, w_sign, 2, 6, 3, 7);
const v128_t x_product_lo = wasm_i64x2_add(wasm_i64x2_mul(x_lo, vmultiplier), vtwice_q31rounding);
const v128_t y_product_lo = wasm_i64x2_add(wasm_i64x2_mul(y_lo, vmultiplier), vtwice_q31rounding);
const v128_t z_product_lo = wasm_i64x2_add(wasm_i64x2_mul(z_lo, vmultiplier), vtwice_q31rounding);
const v128_t w_product_lo = wasm_i64x2_add(wasm_i64x2_mul(w_lo, vmultiplier), vtwice_q31rounding);
const v128_t x_product_hi = wasm_i64x2_add(wasm_i64x2_mul(x_hi, vmultiplier), vtwice_q31rounding);
const v128_t y_product_hi = wasm_i64x2_add(wasm_i64x2_mul(y_hi, vmultiplier), vtwice_q31rounding);
const v128_t z_product_hi = wasm_i64x2_add(wasm_i64x2_mul(z_hi, vmultiplier), vtwice_q31rounding);
const v128_t w_product_hi = wasm_i64x2_add(wasm_i64x2_mul(w_hi, vmultiplier), vtwice_q31rounding);
const v128_t x_q31product = wasm_v32x4_shuffle(x_product_lo, x_product_hi, 1, 3, 5, 7);
const v128_t y_q31product = wasm_v32x4_shuffle(y_product_lo, y_product_hi, 1, 3, 5, 7);
const v128_t z_q31product = wasm_v32x4_shuffle(z_product_lo, z_product_hi, 1, 3, 5, 7);
const v128_t w_q31product = wasm_v32x4_shuffle(w_product_lo, w_product_hi, 1, 3, 5, 7);
const v128_t x_remainder =
wasm_i32x4_add(wasm_v128_and(x_q31product, vremainder_mask), wasm_i32x4_shr(x_q31product, 31));
const v128_t y_remainder =
wasm_i32x4_add(wasm_v128_and(y_q31product, vremainder_mask), wasm_i32x4_shr(y_q31product, 31));
const v128_t z_remainder =
wasm_i32x4_add(wasm_v128_and(z_q31product, vremainder_mask), wasm_i32x4_shr(z_q31product, 31));
const v128_t w_remainder =
wasm_i32x4_add(wasm_v128_and(w_q31product, vremainder_mask), wasm_i32x4_shr(w_q31product, 31));
const v128_t x_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(x_q31product, shift), wasm_i32x4_gt(x_remainder, vthreshold));
const v128_t y_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(y_q31product, shift), wasm_i32x4_gt(y_remainder, vthreshold));
const v128_t z_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(z_q31product, shift), wasm_i32x4_gt(z_remainder, vthreshold));
const v128_t w_scaled =
wasm_i32x4_sub(wasm_i32x4_shr(w_q31product, shift), wasm_i32x4_gt(w_remainder, vthreshold));
const v128_t xy_packed = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(x_scaled, y_scaled), vzero_point);
const v128_t zw_packed = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(z_scaled, w_scaled), vzero_point);
const v128_t xyzw_packed = wasm_u8x16_narrow_i16x8(xy_packed, zw_packed);
const v128_t xyzw_clamped = wasm_u8x16_min(wasm_u8x16_max(xyzw_packed, vqmin), vqmax);
// 12x v128.shuffle
// 8x i32x4.lt
// 8x i64x2.add
// 8x i64x2.mul
// 4x v128.and
// 4x i32x4.add
// 4x i32x4.sub
// 4x i32x4.gt
// 4x i32x4.shr_s
// 2x i16x8.narrow_i32x4_s
// 2x i16x8.add_saturate_s
// 1x i8x16.narrow_i16x8_u
// 1x i8x16.max_u
// 1x i8x16.min_u
// ---------------------
// 63 instructions total
wasm_v128_store(output, xyzw_clamped);
output += 16;
}
}
| 5,863
| 42.117647
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-neon.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <arm_neon.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__neon(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const int32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
#if defined(__aarch64__)
const int32x4_t vmultiplier = vdupq_n_s32(multiplier);
#else
const int32x2_t vmultiplier = vdup_n_s32(multiplier);
#endif
const int16x8_t vzero_point = vdupq_n_s16((int16_t)(uint16_t) zero_point);
const int64x2_t vshift = vdupq_n_s64(-shift);
const uint8x16_t vqmin = vdupq_n_u8(qmin);
const uint8x16_t vqmax = vdupq_n_u8(qmax);
for (; n != 0; n -= 16) {
const int32x4_t x = vld1q_s32(input);
const int32x4_t y = vld1q_s32(input + 4);
const int32x4_t z = vld1q_s32(input + 8);
const int32x4_t w = vld1q_s32(input + 12);
input += 16;
const uint32x4_t x_neg_mask = vcltq_s32(x, vmovq_n_s32(0));
const uint32x4_t y_neg_mask = vcltq_s32(y, vmovq_n_s32(0));
const uint32x4_t z_neg_mask = vcltq_s32(z, vmovq_n_s32(0));
const uint32x4_t w_neg_mask = vcltq_s32(w, vmovq_n_s32(0));
#if defined(__aarch64__)
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vget_low_s32(vmultiplier));
const int64x2_t x23_product = vmull_high_s32(x, vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vget_low_s32(vmultiplier));
const int64x2_t y23_product = vmull_high_s32(y, vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vget_low_s32(vmultiplier));
const int64x2_t z23_product = vmull_high_s32(z, vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vget_low_s32(vmultiplier));
const int64x2_t w23_product = vmull_high_s32(w, vmultiplier);
#else
const int64x2_t x01_product = vmull_s32(vget_low_s32(x), vmultiplier);
const int64x2_t x23_product = vmull_s32(vget_high_s32(x), vmultiplier);
const int64x2_t y01_product = vmull_s32(vget_low_s32(y), vmultiplier);
const int64x2_t y23_product = vmull_s32(vget_high_s32(y), vmultiplier);
const int64x2_t z01_product = vmull_s32(vget_low_s32(z), vmultiplier);
const int64x2_t z23_product = vmull_s32(vget_high_s32(z), vmultiplier);
const int64x2_t w01_product = vmull_s32(vget_low_s32(w), vmultiplier);
const int64x2_t w23_product = vmull_s32(vget_high_s32(w), vmultiplier);
#endif
#if defined(__aarch64__)
const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
const int64x2_t x23_adjusted_product = vaddw_high_s32(x23_product, vreinterpretq_s32_u32(x_neg_mask));
const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
const int64x2_t y23_adjusted_product = vaddw_high_s32(y23_product, vreinterpretq_s32_u32(y_neg_mask));
const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
const int64x2_t z23_adjusted_product = vaddw_high_s32(z23_product, vreinterpretq_s32_u32(z_neg_mask));
const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
const int64x2_t w23_adjusted_product = vaddw_high_s32(w23_product, vreinterpretq_s32_u32(w_neg_mask));
#else
const int64x2_t x01_adjusted_product = vaddw_s32(x01_product, vreinterpret_s32_u32(vget_low_u32(x_neg_mask)));
const int64x2_t x23_adjusted_product = vaddw_s32(x23_product, vreinterpret_s32_u32(vget_high_u32(x_neg_mask)));
const int64x2_t y01_adjusted_product = vaddw_s32(y01_product, vreinterpret_s32_u32(vget_low_u32(y_neg_mask)));
const int64x2_t y23_adjusted_product = vaddw_s32(y23_product, vreinterpret_s32_u32(vget_high_u32(y_neg_mask)));
const int64x2_t z01_adjusted_product = vaddw_s32(z01_product, vreinterpret_s32_u32(vget_low_u32(z_neg_mask)));
const int64x2_t z23_adjusted_product = vaddw_s32(z23_product, vreinterpret_s32_u32(vget_high_u32(z_neg_mask)));
const int64x2_t w01_adjusted_product = vaddw_s32(w01_product, vreinterpret_s32_u32(vget_low_u32(w_neg_mask)));
const int64x2_t w23_adjusted_product = vaddw_s32(w23_product, vreinterpret_s32_u32(vget_high_u32(w_neg_mask)));
#endif
const int64x2_t x01_scaled = vrshlq_s64(x01_adjusted_product, vshift);
const int64x2_t x23_scaled = vrshlq_s64(x23_adjusted_product, vshift);
const int64x2_t y01_scaled = vrshlq_s64(y01_adjusted_product, vshift);
const int64x2_t y23_scaled = vrshlq_s64(y23_adjusted_product, vshift);
const int64x2_t z01_scaled = vrshlq_s64(z01_adjusted_product, vshift);
const int64x2_t z23_scaled = vrshlq_s64(z23_adjusted_product, vshift);
const int64x2_t w01_scaled = vrshlq_s64(w01_adjusted_product, vshift);
const int64x2_t w23_scaled = vrshlq_s64(w23_adjusted_product, vshift);
#ifdef __aarch64__
const int32x4_t x_scaled = vuzp1q_s32(vreinterpretq_s32_s64(x01_scaled), vreinterpretq_s32_s64(x23_scaled));
const int32x4_t y_scaled = vuzp1q_s32(vreinterpretq_s32_s64(y01_scaled), vreinterpretq_s32_s64(y23_scaled));
const int32x4_t z_scaled = vuzp1q_s32(vreinterpretq_s32_s64(z01_scaled), vreinterpretq_s32_s64(z23_scaled));
const int32x4_t w_scaled = vuzp1q_s32(vreinterpretq_s32_s64(w01_scaled), vreinterpretq_s32_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(x_scaled), y_scaled), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(z_scaled), w_scaled), vzero_point);
const uint8x16_t xyzw_packed = vqmovun_high_s16(vqmovun_s16(xy_packed), zw_packed);
#else
const int32x4_t x_scaled = vcombine_s32(vmovn_s64(x01_scaled), vmovn_s64(x23_scaled));
const int32x4_t y_scaled = vcombine_s32(vmovn_s64(y01_scaled), vmovn_s64(y23_scaled));
const int32x4_t z_scaled = vcombine_s32(vmovn_s64(z01_scaled), vmovn_s64(z23_scaled));
const int32x4_t w_scaled = vcombine_s32(vmovn_s64(w01_scaled), vmovn_s64(w23_scaled));
const int16x8_t xy_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(x_scaled), vqmovn_s32(y_scaled)), vzero_point);
const int16x8_t zw_packed = vqaddq_s16(vcombine_s16(vqmovn_s32(z_scaled), vqmovn_s32(w_scaled)), vzero_point);
const uint8x16_t xyzw_packed = vcombine_u8(vqmovun_s16(xy_packed), vqmovun_s16(zw_packed));
#endif
const uint8x16_t xyzw_clamped = vmaxq_u8(vminq_u8(xyzw_packed, vqmax), vqmin);
// AArch32 version:
// 4x VCLT.S32 Qd, Qm, #0
// 8x VMULL.S32 Qd, Dm, Dn
// 8x VADDW.S32 Qd, Qm, Dn
// 8x VRSHL.S32 Qd, Qm, Qn
// 8x VMOVN.S64 Dd, Qm
// 4x VQMOVN.S32 Dd, Qm
// 2x VQADD.S16 Qd, Qm, Qn
// 2x VQMOVUN.S16 Dd, Qm
// 1x VMAX.U8 Qd, Qm, Qn
// 1x VMIN.U8 Qd, Qm, Qn
// ---------------------
// 46 instructions total
//
// AArch64 version:
// 4x CMLT Vd.4S, Vn.4S, #0
// 4x SMULL Vd.2D, Vn.2S, Vm.2S
// 4x SMULL2 Vd.2D, Vn.4S, Vm.4S
// 4x SADDW Vd.2D, Vn.2D, Vm.2S
// 4x SADDW2 Vd.2D, Vn.2D, Vm.4S
// 8x SRSHL Vd.2D, Vn.2D, Vm.2D
// 4x UZP1 Vd.4S, Vn.4S, Vm.4S
// 2x SQXTN Vd.4H, Vn.4S
// 2x SQXTN2 Vd.8H, Vn.4S
// 2x SQADD Vd.8H, Vn.8H, Vm.8H
// 1x SQXTUN Vd.8B, Vn.8H
// 1x SQXTUN2 Vd.16B, Vn.8H
// 1x UMIN Vd.16B, Vn.16B, Vm.16B
// 1x UMAX Vd.16B, Vn.16B, Vm.16B
// ---------------------
// 42 instructions total
vst1q_u8(output, xyzw_clamped);
output += 16;
}
}
| 8,090
| 47.740964
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-scalar-signed64.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__scalar_signed64(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const int32_t multiplier = ((int32_t) scale_bits & INT32_C(0x007FFFFF)) | INT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const int64_t rounding = INT64_C(1) << (shift - 1);
const int32_t smin = (int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point;
const int32_t smax = (int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute full 64-bit product of signed 32-bit factors.
//
// Note: multiplier can be treated as either signed or unsigned.
const int64_t x_product = (int64_t) x * (int64_t) multiplier;
const int64_t y_product = (int64_t) y * (int64_t) multiplier;
const int64_t z_product = (int64_t) z * (int64_t) multiplier;
const int64_t w_product = (int64_t) w * (int64_t) multiplier;
// Adjust product before subsequent shift with rounding up to simulate shift with rounding away from zero.
const int64_t x_adjusted_product = x_product - (int64_t) (x < 0);
const int64_t y_adjusted_product = y_product - (int64_t) (y < 0);
const int64_t z_adjusted_product = z_product - (int64_t) (z < 0);
const int64_t w_adjusted_product = w_product - (int64_t) (w < 0);
// Arithmetically shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up.
//
// Note that although rounding is precomputed, it is dependent on shift value, and on processors with 64-bit
// "right shift with rounding" instruction each line below can be represented by just one such instruction
// (e.g. VRSHL.S64 on ARM NEON, SRSHL in ARM64 Advanced SIMD).
const int32_t x_scaled = (int32_t) math_asr_s64(x_adjusted_product + rounding, shift);
const int32_t y_scaled = (int32_t) math_asr_s64(y_adjusted_product + rounding, shift);
const int32_t z_scaled = (int32_t) math_asr_s64(z_adjusted_product + rounding, shift);
const int32_t w_scaled = (int32_t) math_asr_s64(w_adjusted_product + rounding, shift);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 255) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
output[2] = (uint8_t) z_biased;
output[3] = (uint8_t) w_biased;
output += 4;
}
}
| 4,020
| 41.776596
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-scalar-unsigned32.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__scalar_unsigned32(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const uint32_t rounding_hi = (uint32_t) (rounding >> 32);
const uint32_t rounding_lo = (uint32_t) rounding;
const uint32_t shift_minus_32 = shift - 32;
const int32_t smin = (int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point;
const int32_t smax = (int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute absolute value of input as unsigned 32-bit int.
// All further computations will work with unsigned values to avoid undefined behaviour on signed operations.
const uint32_t x_abs = (x >= 0) ? (uint32_t) x : -(uint32_t) x;
const uint32_t y_abs = (y >= 0) ? (uint32_t) y : -(uint32_t) y;
const uint32_t z_abs = (z >= 0) ? (uint32_t) z : -(uint32_t) z;
const uint32_t w_abs = (w >= 0) ? (uint32_t) w : -(uint32_t) w;
// Compute full 64-bit product of 32-bit factors.
const uint64_t x_product = (uint64_t) x_abs * (uint64_t) multiplier;
const uint64_t y_product = (uint64_t) y_abs * (uint64_t) multiplier;
const uint64_t z_product = (uint64_t) z_abs * (uint64_t) multiplier;
const uint64_t w_product = (uint64_t) w_abs * (uint64_t) multiplier;
// Shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up (same as away from zero).
//
// Generally, this operation requires both 64-bit addition and 64-bit shift, but we use two tricks to replace
// 64-bit operations with 32-bit operations.
//
// To avoid full 64-bit addition we make use of three facts:
// - 64-bit rounding value added before the shift is a power of 2, and thus has only one bit set.
// - When 0x1.0p-32f <= scale < 0x1.0p-31f, then the non-zero bit in rounding is in the low 32 bits, and
// rounding is exactly 0x80000000 (2**31), because rounding is 2**(scale-1) and scale >= 32. In this case,
// addition of rounding can affect high 32 bits of the product only through overflow, which happens if
// low 32-bit part of the product equals or exceeds 0x80000000. We can reformulate the latter condition
// as low 32-bit part of the product has the bit 31 set, and then overflow happens if both the low 32-bit part
// of the product and the low 32-bit part of the rounding value have bit 31 set. Since 32-bit numbers with the
// bit 31 set are negative when interpreted as signed integers, we can check the overflow condition as
// (int32_t) (LOW(product) & LOW(rounding)) < 0
// - When 0x1.0p-31f <= scale < 1.0f, then the non-zero bit is in the high 32 bits of rounding. We just need
// to do 32-bit addition of high 32 bits of rounding and high 32 bits of product. This addition never
// overflows because product <= 0x80000000 * 0xFFFFFF00 < 2**63 and rounding = 2**(scale-1) <= 2**62.
//
// To avoid full 64-bit shift, we leverage the fact that shift >= 32, and do it in two steps:
// - Shift by 32, which can be implemented by extacting the high 32-bit word on 32-bit systems.
// - Shift by (shift - 32), which can be implemented as a 32-bit shift of high word of addition result.
const uint32_t x_carry_lo = (uint32_t) ((int32_t)((uint32_t) x_product & rounding_lo) < 0);
const uint32_t y_carry_lo = (uint32_t) ((int32_t)((uint32_t) y_product & rounding_lo) < 0);
const uint32_t z_carry_lo = (uint32_t) ((int32_t)((uint32_t) z_product & rounding_lo) < 0);
const uint32_t w_carry_lo = (uint32_t) ((int32_t)((uint32_t) w_product & rounding_lo) < 0);
const uint32_t x_product_hi = (uint32_t) (x_product >> 32);
const uint32_t y_product_hi = (uint32_t) (y_product >> 32);
const uint32_t z_product_hi = (uint32_t) (z_product >> 32);
const uint32_t w_product_hi = (uint32_t) (w_product >> 32);
const uint32_t x_abs_scaled = (uint32_t) (x_product_hi + rounding_hi + x_carry_lo) >> shift_minus_32;
const uint32_t y_abs_scaled = (uint32_t) (y_product_hi + rounding_hi + y_carry_lo) >> shift_minus_32;
const uint32_t z_abs_scaled = (uint32_t) (z_product_hi + rounding_hi + z_carry_lo) >> shift_minus_32;
const uint32_t w_abs_scaled = (uint32_t) (w_product_hi + rounding_hi + w_carry_lo) >> shift_minus_32;
// Copy the sign of input to scaled absolute input value.
const int32_t x_scaled = (int32_t) (x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t) (z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t) (w >= 0 ? w_abs_scaled : -w_abs_scaled);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 255) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
output[2] = (uint8_t) z_biased;
output[3] = (uint8_t) w_biased;
output += 4;
}
}
| 6,784
| 51.596899
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-scalar-unsigned64.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__scalar_unsigned64(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 4 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const int32_t smin = (int32_t) (uint32_t) qmin - (int32_t) (uint32_t) zero_point;
const int32_t smax = (int32_t) (uint32_t) qmax - (int32_t) (uint32_t) zero_point;
for (; n != 0; n -= 4) {
const int32_t x = input[0];
const int32_t y = input[1];
const int32_t z = input[2];
const int32_t w = input[3];
input += 4;
// Compute absolute value of input as unsigned 32-bit int.
// All further computations will work with unsigned values to avoid undefined behaviour on signed operations.
const uint32_t x_abs = (x >= 0) ? (uint32_t) x : -(uint32_t) x;
const uint32_t y_abs = (y >= 0) ? (uint32_t) y : -(uint32_t) y;
const uint32_t z_abs = (z >= 0) ? (uint32_t) z : -(uint32_t) z;
const uint32_t w_abs = (w >= 0) ? (uint32_t) w : -(uint32_t) w;
// Compute full 64-bit product of 32-bit factors.
const uint64_t x_product = (uint64_t) x_abs * (uint64_t) multiplier;
const uint64_t y_product = (uint64_t) y_abs * (uint64_t) multiplier;
const uint64_t z_product = (uint64_t) z_abs * (uint64_t) multiplier;
const uint64_t w_product = (uint64_t) w_abs * (uint64_t) multiplier;
// Shift the full 64-bit product right with rounding.
// Rounding is performed towards closest integer, with midpoints rounded up (same as away from zero).
//
// Note that although rounding is precomputed, it is dependent on shift value, and on processors with 64-bit
// "right shift with rounding" instruction each line below can be represented by just one such instruction
// (e.g. VRSHL.U64 on ARM NEON, URSHL in ARM64 Advanced SIMD).
const uint32_t x_abs_scaled = (uint32_t) ((x_product + rounding) >> shift);
const uint32_t y_abs_scaled = (uint32_t) ((y_product + rounding) >> shift);
const uint32_t z_abs_scaled = (uint32_t) ((z_product + rounding) >> shift);
const uint32_t w_abs_scaled = (uint32_t) ((w_product + rounding) >> shift);
// Copy the sign of input to scaled absolute input value.
//
// On x86 processors with SSSE3 instruction set, this operation nicely maps to PSIGND instruction.
const int32_t x_scaled = (int32_t) (x >= 0 ? x_abs_scaled : -x_abs_scaled);
const int32_t y_scaled = (int32_t) (y >= 0 ? y_abs_scaled : -y_abs_scaled);
const int32_t z_scaled = (int32_t) (z >= 0 ? z_abs_scaled : -z_abs_scaled);
const int32_t w_scaled = (int32_t) (w >= 0 ? w_abs_scaled : -w_abs_scaled);
// Clamp scaled value with zero point between (qmin - zero point) and (qmax - zero point).
const int32_t x_clamped = math_min_s32(math_max_s32(x_scaled, smin), smax);
const int32_t y_clamped = math_min_s32(math_max_s32(y_scaled, smin), smax);
const int32_t z_clamped = math_min_s32(math_max_s32(z_scaled, smin), smax);
const int32_t w_clamped = math_min_s32(math_max_s32(w_scaled, smin), smax);
// Add zero point to clamped value.
// The result is guaranteed to be in [qmin, qmax] range.
//
// This addition can not be safely done before clamping, because scaled values are in [-2147483520, 2147483519]
// range, so addition of zero point (which can be up to 255) can overflow signed 32-bit integer.
const int32_t x_biased = x_clamped + zero_point;
const int32_t y_biased = y_clamped + zero_point;
const int32_t z_biased = z_clamped + zero_point;
const int32_t w_biased = w_clamped + zero_point;
output[0] = (uint8_t) x_biased;
output[1] = (uint8_t) y_biased;
output[2] = (uint8_t) z_biased;
output[3] = (uint8_t) w_biased;
output += 4;
}
}
| 4,479
| 43.356436
| 115
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-sse2.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <emmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__sse2(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), x);
const __m128i y_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), y);
const __m128i z_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), z);
const __m128i w_neg_mask = _mm_cmpgt_epi32(_mm_setzero_si128(), w);
const __m128i x_abs0123 = _mm_sub_epi32(_mm_xor_si128(x, x_neg_mask), x_neg_mask);
const __m128i y_abs0123 = _mm_sub_epi32(_mm_xor_si128(y, y_neg_mask), y_neg_mask);
const __m128i z_abs0123 = _mm_sub_epi32(_mm_xor_si128(z, z_neg_mask), z_neg_mask);
const __m128i w_abs0123 = _mm_sub_epi32(_mm_xor_si128(w, w_neg_mask), w_neg_mask);
const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
const __m128i x_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
const __m128i y_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
const __m128i z_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
const __m128i w_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
const __m128i x_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(x_abs_scaled02), _mm_castsi128_ps(x_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(y_abs_scaled02), _mm_castsi128_ps(y_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(z_abs_scaled02), _mm_castsi128_ps(z_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(w_abs_scaled02), _mm_castsi128_ps(w_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_abs_scaled = _mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_abs_scaled = _mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_abs_scaled = _mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_abs_scaled = _mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_scaled = _mm_sub_epi32(_mm_xor_si128(x_abs_scaled, x_neg_mask), x_neg_mask);
const __m128i y_scaled = _mm_sub_epi32(_mm_xor_si128(y_abs_scaled, y_neg_mask), y_neg_mask);
const __m128i z_scaled = _mm_sub_epi32(_mm_xor_si128(z_abs_scaled, z_neg_mask), z_neg_mask);
const __m128i w_scaled = _mm_sub_epi32(_mm_xor_si128(w_abs_scaled, w_neg_mask), w_neg_mask);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 4x PXOR (setzero)
// 8x PSUBD
// 8x PXOR
// 8x PSHUFD
// 8x PMULUDQ
// 8x PSRLQ
// 8x PADDQ
// 4x SHUFPS
// 2x PACKSSDW
// 1x PACKUSWB
// 2x PADDW
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 63 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 6,117
| 46.061538
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-sse41.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <smmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__sse41(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits << 8) | UINT32_C(0x80000000);
const uint32_t shift = 127 + 31 - (scale_bits >> 23);
assert(shift >= 32);
assert(shift < 64);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshiftlo = _mm_cvtsi32_si128((int) shift);
const __m128i vshifthi = _mm_cvtsi32_si128((int) shift - 32);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_abs0123 = _mm_abs_epi32(x);
const __m128i y_abs0123 = _mm_abs_epi32(y);
const __m128i z_abs0123 = _mm_abs_epi32(z);
const __m128i w_abs0123 = _mm_abs_epi32(w);
const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshiftlo);
const __m128i x_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(x_absmul13, vrounding), vshifthi);
const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshiftlo);
const __m128i y_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(y_absmul13, vrounding), vshifthi);
const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshiftlo);
const __m128i z_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(z_absmul13, vrounding), vshifthi);
const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshiftlo);
const __m128i w_abs_scaled13 = _mm_srl_epi32(_mm_add_epi64(w_absmul13, vrounding), vshifthi);
const __m128i x_abs_scaled = _mm_blend_epi16(x_abs_scaled02, x_abs_scaled13, 0xCC);
const __m128i y_abs_scaled = _mm_blend_epi16(y_abs_scaled02, y_abs_scaled13, 0xCC);
const __m128i z_abs_scaled = _mm_blend_epi16(z_abs_scaled02, z_abs_scaled13, 0xCC);
const __m128i w_abs_scaled = _mm_blend_epi16(w_abs_scaled02, w_abs_scaled13, 0xCC);
const __m128i x_scaled = _mm_sign_epi32(x_abs_scaled, x);
const __m128i y_scaled = _mm_sign_epi32(y_abs_scaled, y);
const __m128i z_scaled = _mm_sign_epi32(z_abs_scaled, z);
const __m128i w_scaled = _mm_sign_epi32(w_abs_scaled, w);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 4x PABSD
// 4x PSHUFD
// 8x PMULUDQ
// 4x PSRLQ
// 4x PSRLD
// 8x PADDQ
// 4x PBLENDW
// 4x PSIGND
// 2x PACKSSDW
// 1x PACKUSWB
// 2x PADDW
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 47 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 4,870
| 40.632479
| 97
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-requantization/qu8-requantization-rndna-ssse3.c
|
// Copyright (c) Facebook, Inc. and its affiliates.
// All rights reserved.
//
// Copyright 2019 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <stdint.h>
#include <stddef.h>
#include <tmmintrin.h>
#include <xnnpack/math.h>
#include <xnnpack/requantization-stubs.h>
void xnn_qu8_requantize_rndna__ssse3(
size_t n,
const int32_t* input,
float scale,
uint8_t zero_point,
uint8_t qmin,
uint8_t qmax,
uint8_t* output)
{
assert(n % 16 == 0);
assert(scale < 1.0f);
assert(scale >= 0x1.0p-32f);
const uint32_t scale_bits = float_as_uint32(scale);
const uint32_t multiplier = (scale_bits & UINT32_C(0x007FFFFF)) | UINT32_C(0x00800000);
const uint32_t shift = 127 + 23 - (scale_bits >> 23);
assert(shift >= 24);
assert(shift < 56);
const uint64_t rounding = UINT64_C(1) << (shift - 1);
const __m128i vmultiplier = _mm_set1_epi32(multiplier);
const __m128i vzero_point = _mm_set1_epi16((short) (uint16_t) zero_point);
const __m128i vqmin = _mm_set1_epi8((char) qmin);
const __m128i vqmax = _mm_set1_epi8((char) qmax);
const __m128i vshift = _mm_cvtsi32_si128((int) shift);
const __m128i vrounding = _mm_set1_epi64x(rounding);
for (; n != 0; n -= 16) {
const __m128i x = _mm_loadu_si128((const __m128i*) input);
const __m128i y = _mm_loadu_si128((const __m128i*) (input + 4));
const __m128i z = _mm_loadu_si128((const __m128i*) (input + 8));
const __m128i w = _mm_loadu_si128((const __m128i*) (input + 12));
input += 16;
const __m128i x_abs0123 = _mm_abs_epi32(x);
const __m128i y_abs0123 = _mm_abs_epi32(y);
const __m128i z_abs0123 = _mm_abs_epi32(z);
const __m128i w_abs0123 = _mm_abs_epi32(w);
const __m128i x_abs1032 = _mm_shuffle_epi32(x_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i y_abs1032 = _mm_shuffle_epi32(y_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i z_abs1032 = _mm_shuffle_epi32(z_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i w_abs1032 = _mm_shuffle_epi32(w_abs0123, _MM_SHUFFLE(2, 3, 0, 1));
const __m128i x_absmul02 = _mm_mul_epu32(x_abs0123, vmultiplier);
const __m128i y_absmul02 = _mm_mul_epu32(y_abs0123, vmultiplier);
const __m128i z_absmul02 = _mm_mul_epu32(z_abs0123, vmultiplier);
const __m128i w_absmul02 = _mm_mul_epu32(w_abs0123, vmultiplier);
const __m128i x_absmul13 = _mm_mul_epu32(x_abs1032, vmultiplier);
const __m128i y_absmul13 = _mm_mul_epu32(y_abs1032, vmultiplier);
const __m128i z_absmul13 = _mm_mul_epu32(z_abs1032, vmultiplier);
const __m128i w_absmul13 = _mm_mul_epu32(w_abs1032, vmultiplier);
const __m128i x_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(x_absmul02, vrounding), vshift);
const __m128i x_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(x_absmul13, vrounding), vshift);
const __m128i y_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(y_absmul02, vrounding), vshift);
const __m128i y_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(y_absmul13, vrounding), vshift);
const __m128i z_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(z_absmul02, vrounding), vshift);
const __m128i z_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(z_absmul13, vrounding), vshift);
const __m128i w_abs_scaled02 = _mm_srl_epi64(_mm_add_epi64(w_absmul02, vrounding), vshift);
const __m128i w_abs_scaled13 = _mm_srl_epi64(_mm_add_epi64(w_absmul13, vrounding), vshift);
const __m128i x_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(x_abs_scaled02), _mm_castsi128_ps(x_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i y_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(y_abs_scaled02), _mm_castsi128_ps(y_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i z_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(z_abs_scaled02), _mm_castsi128_ps(z_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i w_abs_scaled0213 = _mm_castps_si128(
_mm_shuffle_ps(_mm_castsi128_ps(w_abs_scaled02), _mm_castsi128_ps(w_abs_scaled13), _MM_SHUFFLE(2, 0, 2, 0)));
const __m128i x_abs_scaled = _mm_shuffle_epi32(x_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i y_abs_scaled = _mm_shuffle_epi32(y_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i z_abs_scaled = _mm_shuffle_epi32(z_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i w_abs_scaled = _mm_shuffle_epi32(w_abs_scaled0213, _MM_SHUFFLE(3, 1, 2, 0));
const __m128i x_scaled = _mm_sign_epi32(x_abs_scaled, x);
const __m128i y_scaled = _mm_sign_epi32(y_abs_scaled, y);
const __m128i z_scaled = _mm_sign_epi32(z_abs_scaled, z);
const __m128i w_scaled = _mm_sign_epi32(w_abs_scaled, w);
const __m128i xy_packed = _mm_adds_epi16(_mm_packs_epi32(x_scaled, y_scaled), vzero_point);
const __m128i zw_packed = _mm_adds_epi16(_mm_packs_epi32(z_scaled, w_scaled), vzero_point);
const __m128i xyzw_packed = _mm_packus_epi16(xy_packed, zw_packed);
const __m128i xyzw_clamped = _mm_max_epu8(_mm_min_epu8(xyzw_packed, vqmax), vqmin);
// 4x PABSD
// 8x PSHUFD
// 8x PMULUDQ
// 8x PSRLQ
// 8x PADDQ
// 4x SHUFPS
// 4x PSIGND
// 2x PACKSSDW
// 1x PACKUSWB
// 2x PADDW
// 1x PMAXUB
// 1x PMINUB
// ---------------------
// 51 instructions total
_mm_storeu_si128((__m128i*) output, xyzw_clamped);
output += 16;
}
}
| 5,510
| 43.443548
| 117
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-avx-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__avx_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,427
| 42.079365
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-avx-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__avx_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,369
| 39.091743
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-avx-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__avx_mul32_ld32_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4.bias));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,859
| 42.392857
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-avx-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__avx_mul32_ld32_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4.bias));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 3,981
| 40.051546
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-neon-ld128-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__neon_ld128_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
#if XNN_ARCH_ARM64
const uint8x16_t va_zero_point = vld1q_dup_u8(¶ms->neon.a_zero_point);
#else
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
#endif
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t va0123456789ABCDEF = vld1q_u8(input_a); input_a += 16;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), vget_low_u8(va_zero_point)));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_high_u8(va0123456789ABCDEF, va_zero_point));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(va0123456789ABCDEF), va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(va0123456789ABCDEF), va_zero_point));
#endif // XNN_ARCH_ARM64
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
#if XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, vget_low_u8(va_zero_point)));
#else // !XNN_ARCH_ARM64
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
#endif
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,119
| 42.02521
| 129
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-neon-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,501
| 41.074766
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-neon-ld64-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x32(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const uint8x8_t va89ABCDEF = vld1_u8(input_a); input_a += 8;
const uint8x8_t vaGHIJKLMN = vld1_u8(input_a); input_a += 8;
const uint8x8_t vaOPQRSTUV = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
const int16x8_t vxa89ABCDEF = vreinterpretq_s16_u16(vsubl_u8(va89ABCDEF, va_zero_point));
const int16x8_t vxaGHIJKLMN = vreinterpretq_s16_u16(vsubl_u8(vaGHIJKLMN, va_zero_point));
const int16x8_t vxaOPQRSTUV = vreinterpretq_s16_u16(vsubl_u8(vaOPQRSTUV, va_zero_point));
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
int32x4_t vacc89AB = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccCDEF = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa89ABCDEF)), va_multiplier);
int32x4_t vaccGHIJ = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccKLMN = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaGHIJKLMN)), va_multiplier);
int32x4_t vaccOPQR = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxaOPQRSTUV)), va_multiplier);
int32x4_t vaccSTUV = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxaOPQRSTUV)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
vacc89AB = vrshlq_s32(vacc89AB, vright_shift);
vaccCDEF = vrshlq_s32(vaccCDEF, vright_shift);
vaccGHIJ = vrshlq_s32(vaccGHIJ, vright_shift);
vaccKLMN = vrshlq_s32(vaccKLMN, vright_shift);
vaccOPQR = vrshlq_s32(vaccOPQR, vright_shift);
vaccSTUV = vrshlq_s32(vaccSTUV, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
const int16x8_t vacc89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc89AB), vqmovn_s32(vaccCDEF)), voutput_zero_point);
const int16x8_t vaccGHIJKLMN = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccGHIJ), vqmovn_s32(vaccKLMN)), voutput_zero_point);
const int16x8_t vaccOPQRSTUV = vqaddq_s16(vcombine_s16(vqmovn_s32(vaccOPQR), vqmovn_s32(vaccSTUV)), voutput_zero_point);
uint8x16_t vout0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc01234567), vqmovun_s16(vacc89ABCDEF));
uint8x16_t voutGHIJKLMNOPQRSTUV = vcombine_u8(vqmovun_s16(vaccGHIJKLMN), vqmovun_s16(vaccOPQRSTUV));
vout0123456789ABCDEF = vmaxq_u8(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = vmaxq_u8(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = vminq_u8(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = vminq_u8(voutGHIJKLMNOPQRSTUV, voutput_max);
vst1q_u8(output, vout0123456789ABCDEF); output += 16;
vst1q_u8(output, voutGHIJKLMNOPQRSTUV); output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, vget_low_u8(voutput_min));
vout01234567 = vmin_u8(vout01234567, vget_low_u8(voutput_max));
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
vst1_u8(output, vout01234567); output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,966
| 46.736
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-neon-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/neon.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__neon_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->neon.a_zero_point);
const int32x4_t va_multiplier = vld1q_dup_s32(¶ms->neon.a_multiplier);
const int32x4_t vright_shift = vld1q_dup_s32(¶ms->neon.right_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
const uint8x8_t voutput_min = vld1_dup_u8(¶ms->neon.output_min);
const uint8x8_t voutput_max = vld1_dup_u8(¶ms->neon.output_max);
const int32_t vxb = (int32_t) *input_b - (int32_t) params->neon.b_zero_point;
const int32_t vb = params->neon.b_multiplier;
const int32x4_t vbias = vdupq_n_s32(vxb * vb);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t va01234567 = vld1_u8(input_a); input_a += 8;
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
vst1_u8(output, vout01234567); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const uint8x8_t va01234567 = vld1_u8(input_a);
const int16x8_t vxa01234567 = vreinterpretq_s16_u16(vsubl_u8(va01234567, va_zero_point));
int32x4_t vacc0123 = vmlaq_s32(vbias, vmovl_s16(vget_low_s16(vxa01234567)), va_multiplier);
int32x4_t vacc4567 = vmlaq_s32(vbias, vmovl_s16(vget_high_s16(vxa01234567)), va_multiplier);
vacc0123 = vrshlq_s32(vacc0123, vright_shift);
vacc4567 = vrshlq_s32(vacc4567, vright_shift);
const int16x8_t vacc01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0123), vqmovn_s32(vacc4567)), voutput_zero_point);
uint8x8_t vout01234567 = vqmovun_s16(vacc01234567);
vout01234567 = vmax_u8(vout01234567, voutput_min);
vout01234567 = vmin_u8(vout01234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vout01234567), 0); output += 4;
vout01234567 = vext_u8(vout01234567, vout01234567, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vout01234567), 0); output += 2;
vout01234567 = vext_u8(vout01234567, vout01234567, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vout01234567, 0);
}
}
}
}
| 3,551
| 36.787234
| 126
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__scalar_x1(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
const int32_t va_multiplier = params->scalar.a_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
do {
const int32_t va = *input_a++;
const int32_t vacc = vbias + va * va_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (uint8_t) (vout + voutput_zero_point);
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,545
| 31.208333
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__scalar_x2(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
const int32_t va_multiplier = params->scalar.a_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
input_a += 2;
const int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vacc1 = vbias + va1 * va_multiplier;
input_b += 2;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t va = *input_a;
const int32_t vacc = vbias + va * va_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (uint8_t) (vout + voutput_zero_point);
}
}
| 2,311
| 31.56338
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__scalar_x4(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias + (int32_t) *input_b * params->scalar.b_multiplier;
const int32_t va_multiplier = params->scalar.a_multiplier;
const uint32_t vshift = params->scalar.shift;
const int32_t voutput_min_less_zero_point = params->scalar.output_min_less_zero_point;
const int32_t voutput_max_less_zero_point = params->scalar.output_max_less_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const int32_t va0 = input_a[0];
const int32_t va1 = input_a[1];
const int32_t va2 = input_a[2];
const int32_t va3 = input_a[3];
input_a += 4;
const int32_t vacc0 = vbias + va0 * va_multiplier;
const int32_t vacc1 = vbias + va1 * va_multiplier;
const int32_t vacc2 = vbias + va2 * va_multiplier;
const int32_t vacc3 = vbias + va3 * va_multiplier;
input_b += 4;
int32_t vout0 = math_asr_s32(vacc0, vshift);
int32_t vout1 = math_asr_s32(vacc1, vshift);
int32_t vout2 = math_asr_s32(vacc2, vshift);
int32_t vout3 = math_asr_s32(vacc3, vshift);
vout0 = math_max_s32(vout0, voutput_min_less_zero_point);
vout1 = math_max_s32(vout1, voutput_min_less_zero_point);
vout2 = math_max_s32(vout2, voutput_min_less_zero_point);
vout3 = math_max_s32(vout3, voutput_min_less_zero_point);
vout0 = math_min_s32(vout0, voutput_max_less_zero_point);
vout1 = math_min_s32(vout1, voutput_max_less_zero_point);
vout2 = math_min_s32(vout2, voutput_max_less_zero_point);
vout3 = math_min_s32(vout3, voutput_max_less_zero_point);
vout0 += voutput_zero_point;
vout1 += voutput_zero_point;
vout2 += voutput_zero_point;
vout3 += voutput_zero_point;
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t va = *input_a++;
const int32_t vacc = vbias + va * va_multiplier;
int32_t vout = math_asr_s32(vacc, vshift);
vout = math_max_s32(vout, voutput_min_less_zero_point);
vout = math_min_s32(vout, voutput_max_less_zero_point);
*output++ = (uint8_t) (vout + voutput_zero_point);
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 3,053
| 33.314607
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-sse2-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
__m128i va89ABCDEF = _mm_loadl_epi64((const __m128i*) (input_a + 8));
input_a += 16;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
va89ABCDEF = _mm_unpacklo_epi8(va89ABCDEF, vzero);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
va01234567 = _mm_unpacklo_epi8(va01234567, _mm_setzero_si128());
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,576
| 41.9
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-sse2-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__sse2_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
input_a += 8;
const __m128i vzero = _mm_setzero_si128();
va01234567 = _mm_unpacklo_epi8(va01234567, vzero);
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
__m128i va01234567 = _mm_loadl_epi64((const __m128i*) input_a);
va01234567 = _mm_unpacklo_epi8(va01234567, _mm_setzero_si128());
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_cvtsi128_si32(vout0123456701234567);
}
}
}
}
| 4,488
| 39.080357
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-sse41-mul16-ld64-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__sse41_mul16_ld64_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
const __m128i va89ABCDEF = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input_a + 8)));
input_a += 16;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
__m128i vaprod89ABCDEFhi = _mm_mulhi_epu16(va89ABCDEF, va_multiplier_lo);
const __m128i vaprod89ABCDEFlo = _mm_mullo_epi16(va89ABCDEF, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
vaprod89ABCDEFhi = _mm_add_epi16(vaprod89ABCDEFhi, _mm_mullo_epi16(va89ABCDEF, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod89ABCDEFlo, vaprod89ABCDEFhi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,429
| 42.095238
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-sse41-mul16-ld64-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul16-ld64.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__sse41_mul16_ld64_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i vbias = _mm_add_epi32(
_mm_shuffle_epi32(_mm_cvtsi32_si128(params->sse2.b_multiplier * (int32_t) *input_b), _MM_SHUFFLE(0, 0, 0, 0)),
_mm_load_si128((const __m128i*) params->sse2.bias));
const __m128i va_multiplier_lo = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_lo);
const __m128i va_multiplier_hi = _mm_load_si128((const __m128i*) params->sse2.a_multiplier_hi);
const __m128i vshift = _mm_cvtsi32_si128((int) params->sse2.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
input_a += 8;
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va01234567 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input_a));
__m128i vaprod01234567hi = _mm_mulhi_epu16(va01234567, va_multiplier_lo);
const __m128i vaprod01234567lo = _mm_mullo_epi16(va01234567, va_multiplier_lo);
vaprod01234567hi = _mm_add_epi16(vaprod01234567hi, _mm_mullo_epi16(va01234567, va_multiplier_hi));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vaprod01234567lo, vaprod01234567hi));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vaprod01234567lo, vaprod01234567hi));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
__m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 4,371
| 39.110092
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-sse41-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__sse41_mul32_ld32_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4.bias));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
__m128i vacc89AB = _mm_add_epi32(vbias, _mm_mullo_epi32(va89AB, va_multiplier));
__m128i vaccCDEF = _mm_add_epi32(vbias, _mm_mullo_epi32(vaCDEF, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,861
| 42.410714
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-sse41-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__sse41_mul32_ld32_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4.bias));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
__m128i vacc0123 = _mm_add_epi32(vbias, _mm_mullo_epi32(va0123, va_multiplier));
__m128i vacc4567 = _mm_add_epi32(vbias, _mm_mullo_epi32(va4567, va_multiplier));
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 3,983
| 40.072165
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
input_a += 16;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va89ABCDEF), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = wasm_u8x16_max(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,371
| 40.245283
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x32(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
const v128_t va89ABCDEF = wasm_u16x8_load8x8(input_a + 8);
const v128_t vaGHIJKLMN = wasm_u16x8_load8x8(input_a + 16);
const v128_t vaOPQRSTUV = wasm_u16x8_load8x8(input_a + 24);
input_a += 32;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
v128_t vacc89AB = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va89ABCDEF), va_multiplier));
v128_t vaccCDEF = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va89ABCDEF), va_multiplier));
v128_t vaccGHIJ = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccKLMN = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vaGHIJKLMN), va_multiplier));
v128_t vaccOPQR = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(vaOPQRSTUV), va_multiplier));
v128_t vaccSTUV = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(vaOPQRSTUV), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
vacc89AB = wasm_i32x4_shr(vacc89AB, vshift);
vaccCDEF = wasm_i32x4_shr(vaccCDEF, vshift);
vaccGHIJ = wasm_i32x4_shr(vaccGHIJ, vshift);
vaccKLMN = wasm_i32x4_shr(vaccKLMN, vshift);
vaccOPQR = wasm_i32x4_shr(vaccOPQR, vshift);
vaccSTUV = wasm_i32x4_shr(vaccSTUV, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout89ABCDEF = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc89AB, vaccCDEF), voutput_zero_point);
v128_t voutGHIJKLMN = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccGHIJ, vaccKLMN), voutput_zero_point);
v128_t voutOPQRSTUV = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vaccOPQR, vaccSTUV), voutput_zero_point);
v128_t vout0123456789ABCDEF = wasm_u8x16_narrow_i16x8(vout01234567, vout89ABCDEF);
v128_t voutGHIJKLMNOPQRSTUV = wasm_u8x16_narrow_i16x8(voutGHIJKLMN, voutOPQRSTUV);
vout0123456789ABCDEF = wasm_u8x16_max(vout0123456789ABCDEF, voutput_min);
voutGHIJKLMNOPQRSTUV = wasm_u8x16_max(voutGHIJKLMNOPQRSTUV, voutput_min);
vout0123456789ABCDEF = wasm_u8x16_min(vout0123456789ABCDEF, voutput_max);
voutGHIJKLMNOPQRSTUV = wasm_u8x16_min(voutGHIJKLMNOPQRSTUV, voutput_max);
wasm_v128_store(output, vout0123456789ABCDEF);
wasm_v128_store(output + 16, voutGHIJKLMNOPQRSTUV);
output += 32;
}
if XNN_UNLIKELY(batch != 0) {
do {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 5,686
| 45.614754
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__wasmsimd_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const v128_t va_multiplier = wasm_v128_load64_splat(params->wasmsimd.a_multiplier);
const uint32_t vshift = params->wasmsimd.shift;
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
const v128_t voutput_min = wasm_v128_load64_splat(params->wasmsimd.output_min);
const v128_t voutput_max = wasm_v128_load64_splat(params->wasmsimd.output_max);
v128_t vbias = wasm_i32x4_splat((int32_t) *input_b * params->wasmsimd.b_multiplier[0]);
vbias = wasm_i32x4_add(vbias, wasm_v128_load64_splat(params->wasmsimd.bias));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
input_a += 8;
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
wasm_v128_store64_lane(output, vout0123456701234567, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const v128_t va01234567 = wasm_u16x8_load8x8(input_a);
v128_t vacc0123 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_low_u16x8(va01234567), va_multiplier));
v128_t vacc4567 = wasm_i32x4_add(vbias, wasm_i32x4_mul(wasm_u32x4_extend_high_u16x8(va01234567), va_multiplier));
vacc0123 = wasm_i32x4_shr(vacc0123, vshift);
vacc4567 = wasm_i32x4_shr(vacc4567, vshift);
v128_t vout01234567 = wasm_i16x8_add_sat(wasm_i16x8_narrow_i32x4(vacc0123, vacc4567), voutput_zero_point);
v128_t vout0123456701234567 = wasm_u8x16_narrow_i16x8(vout01234567, vout01234567);
vout0123456701234567 = wasm_u8x16_max(vout0123456701234567, voutput_min);
vout0123456701234567 = wasm_u8x16_min(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u64x2_shr(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vout0123456701234567, 0);
vout0123456701234567 = wasm_u32x4_shr(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vout0123456701234567, 0);
}
}
}
}
| 3,580
| 37.923913
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-xop-mul32-ld32-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__xop_mul32_ld32_x16(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4.bias));
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
const __m128i va89AB = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 8)));
const __m128i vaCDEF = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 12)));
input_a += 16;
input_b += 16;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
__m128i vacc89AB = _mm_macc_epi32(va89AB, va_multiplier, vbias);
__m128i vaccCDEF = _mm_macc_epi32(vaCDEF, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
vacc89AB = _mm_sra_epi32(vacc89AB, vshift);
vaccCDEF = _mm_sra_epi32(vaccCDEF, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
const __m128i vout89ABCDEF = _mm_adds_epi16(_mm_packs_epi32(vacc89AB, vaccCDEF), voutput_zero_point);
__m128i vout0123456789ABCDEF = _mm_packus_epi16(vout01234567, vout89ABCDEF);
vout0123456789ABCDEF = _mm_max_epu8(vout0123456789ABCDEF, voutput_min);
vout0123456789ABCDEF = _mm_min_epu8(vout0123456789ABCDEF, voutput_max);
_mm_storeu_si128((__m128i*) output, vout0123456789ABCDEF);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
do {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if XNN_LIKELY(batch >= (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
batch -= 8 * sizeof(uint8_t);
} else {
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
batch = 0;
}
} while (batch != 0);
}
}
| 4,816
| 40.525862
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vaddc/gen/qu8-vaddc-minmax-xop-mul32-ld32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vaddc/sse-mul32-ld32.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vadd.h>
void xnn_qu8_vaddc_minmax_ukernel__xop_mul32_ld32_x8(
size_t batch,
const uint8_t* input_a,
const uint8_t* input_b,
uint8_t* output,
const union xnn_qu8_add_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input_a != NULL);
assert(input_b != NULL);
assert(output != NULL);
const __m128i va_multiplier = _mm_load_si128((const __m128i*) params->sse4.a_multiplier);
const __m128i vshift = _mm_load_si128((const __m128i*) params->sse4.shift);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse4.output_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse4.output_min);
const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse4.output_max);
__m128i vbias = _mm_cvtsi32_si128(params->sse4.b_multiplier[0] * (int32_t) *input_b);
vbias = _mm_shuffle_epi32(vbias, _MM_SHUFFLE(0, 0, 0, 0));
vbias = _mm_add_epi32(vbias, _mm_load_si128((const __m128i*) params->sse4.bias));
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
input_a += 8;
input_b += 8;
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
_mm_storel_epi64((__m128i*) output, vout0123456701234567);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
{
const __m128i va0123 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a)));
const __m128i va4567 = _mm_cvtepu8_epi32(_mm_cvtsi32_si128((int) unaligned_load_s32(input_a + 4)));
__m128i vacc0123 = _mm_macc_epi32(va0123, va_multiplier, vbias);
__m128i vacc4567 = _mm_macc_epi32(va4567, va_multiplier, vbias);
vacc0123 = _mm_sra_epi32(vacc0123, vshift);
vacc4567 = _mm_sra_epi32(vacc4567, vshift);
const __m128i vout01234567 = _mm_adds_epi16(_mm_packs_epi32(vacc0123, vacc4567), voutput_zero_point);
__m128i vout0123456701234567 = _mm_packus_epi16(vout01234567, vout01234567);
vout0123456701234567 = _mm_max_epu8(vout0123456701234567, voutput_min);
vout0123456701234567 = _mm_min_epu8(vout0123456701234567, voutput_max);
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vout0123456701234567));
vout0123456701234567 = _mm_srli_epi64(vout0123456701234567, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) _mm_extract_epi16(vout0123456701234567, 0));
vout0123456701234567 = _mm_srli_epi32(vout0123456701234567, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vout0123456701234567, 0);
}
}
}
}
| 3,970
| 38.316832
| 107
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-armsimd32-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__armsimd32_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x2_t vminus_input_zero_point = (uint16x2_t) params->armsimd32.minus_input_zero_point;
const int32_t vbias = params->armsimd32.bias;
const int32_t vmultiplier = params->armsimd32.multiplier;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
input += 4;
const uint16x2_t vx02 = __uxtab16(vminus_input_zero_point, vx0123);
const uint16x2_t vx13 = __uxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
int32_t vacc3 = __smlawt(vmultiplier, vx13, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 1), 8);
vacc1 = __usat(math_asr_s32(vacc1, 1), 8);
vacc2 = __usat(math_asr_s32(vacc2, 1), 8);
vacc3 = __usat(math_asr_s32(vacc3, 1), 8);
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
output[2] = (uint8_t) vacc2;
output[3] = (uint8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
const uint16x2_t vx02 = __uxtab16(vminus_input_zero_point, vx0123);
const uint16x2_t vx13 = __uxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
const int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 1), 8);
vacc1 = __usat(math_asr_s32(vacc1, 1), 8);
if (batch & (2 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
vacc0 = __usat(math_asr_s32(vacc2, 1), 8);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
}
}
}
| 2,640
| 31.604938
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-armsimd32-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__armsimd32_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x2_t vminus_input_zero_point = (uint16x2_t) params->armsimd32.minus_input_zero_point;
const int32_t vbias = params->armsimd32.bias;
const int32_t vmultiplier = params->armsimd32.multiplier;
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_indexed_load_u32(input, 0);
const uint8x4_t vx4567 = (uint8x4_t) unaligned_indexed_load_u32(input, 1);
input += 8;
const uint16x2_t vx02 = __uxtab16(vminus_input_zero_point, vx0123);
const uint16x2_t vx13 = __uxtab16(vminus_input_zero_point, __ror(vx0123, 8));
const uint16x2_t vx46 = __uxtab16(vminus_input_zero_point, vx4567);
const uint16x2_t vx57 = __uxtab16(vminus_input_zero_point, __ror(vx4567, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
int32_t vacc3 = __smlawt(vmultiplier, vx13, vbias);
int32_t vacc4 = __smlawb(vmultiplier, vx46, vbias);
int32_t vacc5 = __smlawb(vmultiplier, vx57, vbias);
int32_t vacc6 = __smlawt(vmultiplier, vx46, vbias);
int32_t vacc7 = __smlawt(vmultiplier, vx57, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 1), 8);
vacc1 = __usat(math_asr_s32(vacc1, 1), 8);
vacc2 = __usat(math_asr_s32(vacc2, 1), 8);
vacc3 = __usat(math_asr_s32(vacc3, 1), 8);
vacc4 = __usat(math_asr_s32(vacc4, 1), 8);
vacc5 = __usat(math_asr_s32(vacc5, 1), 8);
vacc6 = __usat(math_asr_s32(vacc6, 1), 8);
vacc7 = __usat(math_asr_s32(vacc7, 1), 8);
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
output[2] = (uint8_t) vacc2;
output[3] = (uint8_t) vacc3;
output[4] = (uint8_t) vacc4;
output[5] = (uint8_t) vacc5;
output[6] = (uint8_t) vacc6;
output[7] = (uint8_t) vacc7;
output += 8;
}
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
input += 4;
const uint16x2_t vx02 = __uxtab16(vminus_input_zero_point, vx0123);
const uint16x2_t vx13 = __uxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
int32_t vacc3 = __smlawt(vmultiplier, vx13, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 1), 8);
vacc1 = __usat(math_asr_s32(vacc1, 1), 8);
vacc2 = __usat(math_asr_s32(vacc2, 1), 8);
vacc3 = __usat(math_asr_s32(vacc3, 1), 8);
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
output[2] = (uint8_t) vacc2;
output[3] = (uint8_t) vacc3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
const uint8x4_t vx0123 = (uint8x4_t) unaligned_load_u32(input);
const uint16x2_t vx02 = __uxtab16(vminus_input_zero_point, vx0123);
const uint16x2_t vx13 = __uxtab16(vminus_input_zero_point, __ror(vx0123, 8));
int32_t vacc0 = __smlawb(vmultiplier, vx02, vbias);
int32_t vacc1 = __smlawb(vmultiplier, vx13, vbias);
const int32_t vacc2 = __smlawt(vmultiplier, vx02, vbias);
vacc0 = __usat(math_asr_s32(vacc0, 1), 8);
vacc1 = __usat(math_asr_s32(vacc1, 1), 8);
if (batch & (2 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
output[1] = (uint8_t) vacc1;
vacc0 = __usat(math_asr_s32(vacc2, 1), 8);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
output[0] = (uint8_t) vacc0;
}
}
}
| 4,306
| 35.193277
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-avx-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__avx_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,070
| 32.021505
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-avx-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__avx_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc2 = _mm_slli_epi16(vacc2, 7);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,745
| 34.67619
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-avx-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__avx_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,291
| 31.28169
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-avx2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__avx2_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,666
| 33.192308
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-avx2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__avx2_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m256i vacc0 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
input += 32;
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
__m256i vy0 = _mm256_packus_epi16(vacc0, vacc1);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,542
| 33.735294
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-avx2-x64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/avx2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__avx2_x64(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m256i vinput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.input_zero_point);
const __m256i vmultiplier = _mm256_load_si256((const __m256i*) params->avx2.multiplier);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->avx2.output_zero_point);
for (; batch >= 64 * sizeof(uint8_t); batch -= 64 * sizeof(uint8_t)) {
__m256i vacc0 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
__m256i vacc1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 16)));
__m256i vacc2 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 32)));
__m256i vacc3 = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) (input + 48)));
input += 64;
vacc0 = _mm256_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm256_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm256_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm256_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm256_slli_epi16(vacc0, 7);
vacc1 = _mm256_slli_epi16(vacc1, 7);
vacc2 = _mm256_slli_epi16(vacc2, 7);
vacc3 = _mm256_slli_epi16(vacc3, 7);
vacc0 = _mm256_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm256_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm256_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm256_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm256_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm256_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm256_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm256_adds_epi16(vacc3, voutput_zero_point);
__m256i vy0 = _mm256_packus_epi16(vacc0, vacc1);
__m256i vy1 = _mm256_packus_epi16(vacc2, vacc3);
vy0 = _mm256_permute4x64_epi64(vy0, _MM_SHUFFLE(3, 1, 2, 0));
vy1 = _mm256_permute4x64_epi64(vy1, _MM_SHUFFLE(3, 1, 2, 0));
_mm256_storeu_si256((__m256i*) output, vy0);
_mm256_storeu_si256((__m256i*) (output + 32), vy1);
output += 64;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
input += 16;
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
const __m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
__m256i vacc = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i*) input));
vacc = _mm256_sub_epi16(vinput_zero_point, vacc);
vacc = _mm256_slli_epi16(vacc, 7);
vacc = _mm256_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm256_adds_epi16(vacc, voutput_zero_point);
const __m128i vacc_hi = _mm256_extracti128_si256(vacc, 1);
__m128i vy = _mm_packus_epi16(_mm256_castsi256_si128(vacc), vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 4,313
| 36.513043
| 104
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__neon_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vmultiplier = vld1q_dup_s16(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t vx0 = vld1q_u8(input); input += 16;
int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
vst1q_u8(output, vy0); output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vacc);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 2,849
| 32.928571
| 92
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__neon_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vmultiplier = vld1q_dup_s16(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const uint8x16_t vx0 = vld1q_u8(input); input += 16;
const uint8x16_t vx1 = vld1q_u8(input); input += 16;
int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
int16x8_t vacc2 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx1)));
int16x8_t vacc3 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx1)));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc2 = vshlq_n_s16(vacc2, 7);
vacc3 = vshlq_n_s16(vacc3, 7);
vacc0 = vqrdmulhq_s16(vacc0, vmultiplier);
vacc1 = vqrdmulhq_s16(vacc1, vmultiplier);
vacc2 = vqrdmulhq_s16(vacc2, vmultiplier);
vacc3 = vqrdmulhq_s16(vacc3, vmultiplier);
vacc0 = vqaddq_s16(vacc0, voutput_zero_point);
vacc1 = vqaddq_s16(vacc1, voutput_zero_point);
vacc2 = vqaddq_s16(vacc2, voutput_zero_point);
vacc3 = vqaddq_s16(vacc3, voutput_zero_point);
const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vacc0), vqmovun_s16(vacc1));
const uint8x16_t vy1 = vcombine_u8(vqmovun_s16(vacc2), vqmovun_s16(vacc3));
vst1q_u8(output, vy0); output += 16;
vst1q_u8(output, vy1); output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vacc);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 3,478
| 35.621053
| 92
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/neon.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__neon_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vmultiplier = vld1q_dup_s16(¶ms->neon.multiplier);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vacc);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
vacc = vqrdmulhq_s16(vacc, vmultiplier);
vacc = vqaddq_s16(vacc, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vacc);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 2,136
| 31.876923
| 86
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__scalar_x1(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t vmultiplier = params->scalar.multiplier;
do {
int32_t vacc = *input++;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,019
| 23.878049
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__scalar_x2(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t vmultiplier = params->scalar.multiplier;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
int32_t vacc0 = input[0];
int32_t vacc1 = input[1];
input += 2;
vacc0 = vbias + vacc0 * vmultiplier;
vacc1 = vbias + vacc1 * vmultiplier;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
int32_t vacc = *input;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output = (uint8_t) vout;
}
}
| 1,546
| 24.783333
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__scalar_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const int32_t vbias = params->scalar.bias;
const int32_t vmultiplier = params->scalar.multiplier;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
int32_t vacc0 = input[0];
int32_t vacc1 = input[1];
int32_t vacc2 = input[2];
int32_t vacc3 = input[3];
input += 4;
vacc0 = vbias + vacc0 * vmultiplier;
vacc1 = vbias + vacc1 * vmultiplier;
vacc2 = vbias + vacc2 * vmultiplier;
vacc3 = vbias + vacc3 * vmultiplier;
int32_t vout0 = math_asr_s32(vacc0, 8);
int32_t vout1 = math_asr_s32(vacc1, 8);
int32_t vout2 = math_asr_s32(vacc2, 8);
int32_t vout3 = math_asr_s32(vacc3, 8);
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout2 = math_max_s32(vout2, 0);
vout3 = math_max_s32(vout3, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
vout2 = math_min_s32(vout2, 255);
vout3 = math_min_s32(vout3, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
int32_t vacc = *input++;
vacc = vbias + vacc * vmultiplier;
int32_t vout = math_asr_s32(vacc, 8);
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 2,074
| 26.302632
| 72
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-sse2-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vcvt_ukernel__sse2_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vzero);
const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vzero);
const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
const __m128i vprodhi0 = _mm_mulhi_epu16(vextx0, vmultiplier);
const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
const __m128i vprodhi1 = _mm_mulhi_epu16(vextx1, vmultiplier);
__m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
__m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
__m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
vacc0 = _mm_add_epi32(vacc0, vbias);
vacc1 = _mm_add_epi32(vacc1, vbias);
vacc2 = _mm_add_epi32(vacc2, vbias);
vacc3 = _mm_add_epi32(vacc3, vbias);
vacc0 = _mm_srai_epi32(vacc0, 8);
vacc1 = _mm_srai_epi32(vacc1, 8);
vacc2 = _mm_srai_epi32(vacc2, 8);
vacc3 = _mm_srai_epi32(vacc3, 8);
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc1 = _mm_packs_epi32(vacc2, vacc3);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_add_epi32(vacc_ll, vbias);
vacc_lh = _mm_add_epi32(vacc_lh, vbias);
vacc_hl = _mm_add_epi32(vacc_hl, vbias);
vacc_hh = _mm_add_epi32(vacc_hh, vbias);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_add_epi32(vacc_ll, vbias);
vacc_lh = _mm_add_epi32(vacc_lh, vbias);
vacc_hl = _mm_add_epi32(vacc_hl, vbias);
vacc_hh = _mm_add_epi32(vacc_hh, vbias);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
__m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 5,641
| 35.166667
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-sse2-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse2.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vcvt_ukernel__sse2_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
const __m128i vextx0 = _mm_unpacklo_epi8(vx0, vzero);
const __m128i vextx1 = _mm_unpackhi_epi8(vx0, vzero);
const __m128i vextx2 = _mm_unpacklo_epi8(vx1, vzero);
const __m128i vextx3 = _mm_unpackhi_epi8(vx1, vzero);
const __m128i vprodlo0 = _mm_mullo_epi16(vextx0, vmultiplier);
const __m128i vprodhi0 = _mm_mulhi_epu16(vextx0, vmultiplier);
const __m128i vprodlo1 = _mm_mullo_epi16(vextx1, vmultiplier);
const __m128i vprodhi1 = _mm_mulhi_epu16(vextx1, vmultiplier);
const __m128i vprodlo2 = _mm_mullo_epi16(vextx2, vmultiplier);
const __m128i vprodhi2 = _mm_mulhi_epu16(vextx2, vmultiplier);
const __m128i vprodlo3 = _mm_mullo_epi16(vextx3, vmultiplier);
const __m128i vprodhi3 = _mm_mulhi_epu16(vextx3, vmultiplier);
__m128i vacc0 = _mm_unpacklo_epi16(vprodlo0, vprodhi0);
__m128i vacc1 = _mm_unpackhi_epi16(vprodlo0, vprodhi0);
__m128i vacc2 = _mm_unpacklo_epi16(vprodlo1, vprodhi1);
__m128i vacc3 = _mm_unpackhi_epi16(vprodlo1, vprodhi1);
__m128i vacc4 = _mm_unpacklo_epi16(vprodlo2, vprodhi2);
__m128i vacc5 = _mm_unpackhi_epi16(vprodlo2, vprodhi2);
__m128i vacc6 = _mm_unpacklo_epi16(vprodlo3, vprodhi3);
__m128i vacc7 = _mm_unpackhi_epi16(vprodlo3, vprodhi3);
vacc0 = _mm_add_epi32(vacc0, vbias);
vacc1 = _mm_add_epi32(vacc1, vbias);
vacc2 = _mm_add_epi32(vacc2, vbias);
vacc3 = _mm_add_epi32(vacc3, vbias);
vacc4 = _mm_add_epi32(vacc4, vbias);
vacc5 = _mm_add_epi32(vacc5, vbias);
vacc6 = _mm_add_epi32(vacc6, vbias);
vacc7 = _mm_add_epi32(vacc7, vbias);
vacc0 = _mm_srai_epi32(vacc0, 8);
vacc1 = _mm_srai_epi32(vacc1, 8);
vacc2 = _mm_srai_epi32(vacc2, 8);
vacc3 = _mm_srai_epi32(vacc3, 8);
vacc4 = _mm_srai_epi32(vacc4, 8);
vacc5 = _mm_srai_epi32(vacc5, 8);
vacc6 = _mm_srai_epi32(vacc6, 8);
vacc7 = _mm_srai_epi32(vacc7, 8);
vacc0 = _mm_packs_epi32(vacc0, vacc1);
vacc1 = _mm_packs_epi32(vacc2, vacc3);
vacc2 = _mm_packs_epi32(vacc4, vacc5);
vacc3 = _mm_packs_epi32(vacc6, vacc7);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_add_epi32(vacc_ll, vbias);
vacc_lh = _mm_add_epi32(vacc_lh, vbias);
vacc_hl = _mm_add_epi32(vacc_hl, vbias);
vacc_hh = _mm_add_epi32(vacc_hh, vbias);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
const __m128i vextx_lo = _mm_unpacklo_epi8(vx, vzero);
const __m128i vextx_hi = _mm_unpackhi_epi8(vx, vzero);
const __m128i vprodlo_lo = _mm_mullo_epi16(vextx_lo, vmultiplier);
const __m128i vprodlo_hi = _mm_mullo_epi16(vextx_hi, vmultiplier);
const __m128i vprodhi_lo = _mm_mulhi_epu16(vextx_lo, vmultiplier);
const __m128i vprodhi_hi = _mm_mulhi_epu16(vextx_hi, vmultiplier);
__m128i vacc_ll = _mm_unpacklo_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_lh = _mm_unpackhi_epi16(vprodlo_lo, vprodhi_lo);
__m128i vacc_hl = _mm_unpacklo_epi16(vprodlo_hi, vprodhi_hi);
__m128i vacc_hh = _mm_unpackhi_epi16(vprodlo_hi, vprodhi_hi);
vacc_ll = _mm_add_epi32(vacc_ll, vbias);
vacc_lh = _mm_add_epi32(vacc_lh, vbias);
vacc_hl = _mm_add_epi32(vacc_hl, vbias);
vacc_hh = _mm_add_epi32(vacc_hh, vbias);
vacc_ll = _mm_srai_epi32(vacc_ll, 8);
vacc_lh = _mm_srai_epi32(vacc_lh, 8);
vacc_hl = _mm_srai_epi32(vacc_hl, 8);
vacc_hh = _mm_srai_epi32(vacc_hh, 8);
const __m128i vacc_lo = _mm_packs_epi32(vacc_ll, vacc_lh);
const __m128i vacc_hi = _mm_packs_epi32(vacc_hl, vacc_hh);
__m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 6,848
| 37.26257
| 87
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-sse41-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__sse41_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
input += 16;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,072
| 32.043011
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-sse41-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__sse41_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
__m128i vacc0 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
__m128i vacc1 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 8)));
__m128i vacc2 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 16)));
__m128i vacc3 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) (input + 24)));
input += 32;
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc2 = _mm_slli_epi16(vacc2, 7);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 3,747
| 34.695238
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-sse41-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/sse4.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__sse41_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
input += 8;
const __m128i vy = _mm_packus_epi16(vacc, vacc);
_mm_storel_epi64((__m128i*) output, vy);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
__m128i vacc = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i*) input));
vacc = _mm_sub_epi16(vinput_zero_point, vacc);
vacc = _mm_slli_epi16(vacc, 7);
vacc = _mm_mulhrs_epi16(vacc, vmultiplier);
vacc = _mm_adds_epi16(vacc, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
_mm_storeu_si32(output, vy);
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
_mm_storeu_si16(output, vy);
vy = _mm_srli_epi32(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) _mm_extract_epi8(vy, 0);
}
}
}
| 2,293
| 31.309859
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-ssse3-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vcvt_ukernel__ssse3_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vacc0 = _mm_unpacklo_epi8(vx0, vzero);
__m128i vacc1 = _mm_unpackhi_epi8(vx0, vzero);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
_mm_storeu_si128((__m128i*) output, vy0);
output += 16;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 4,005
| 33.534483
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-ssse3-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/ssse3.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <tmmintrin.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_vcvt_ukernel__ssse3_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const __m128i vinput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.input_zero_point);
const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->ssse3.multiplier);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->ssse3.output_zero_point);
const __m128i vzero = _mm_setzero_si128();
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const __m128i vx0 = _mm_loadu_si128((const __m128i*) input);
const __m128i vx1 = _mm_loadu_si128((const __m128i*) (input + 16));
input += 32;
__m128i vacc0 = _mm_unpacklo_epi8(vx0, vzero);
__m128i vacc1 = _mm_unpackhi_epi8(vx0, vzero);
__m128i vacc2 = _mm_unpacklo_epi8(vx1, vzero);
__m128i vacc3 = _mm_unpackhi_epi8(vx1, vzero);
vacc0 = _mm_sub_epi16(vinput_zero_point, vacc0);
vacc1 = _mm_sub_epi16(vinput_zero_point, vacc1);
vacc2 = _mm_sub_epi16(vinput_zero_point, vacc2);
vacc3 = _mm_sub_epi16(vinput_zero_point, vacc3);
vacc0 = _mm_slli_epi16(vacc0, 7);
vacc1 = _mm_slli_epi16(vacc1, 7);
vacc2 = _mm_slli_epi16(vacc2, 7);
vacc3 = _mm_slli_epi16(vacc3, 7);
vacc0 = _mm_mulhrs_epi16(vacc0, vmultiplier);
vacc1 = _mm_mulhrs_epi16(vacc1, vmultiplier);
vacc2 = _mm_mulhrs_epi16(vacc2, vmultiplier);
vacc3 = _mm_mulhrs_epi16(vacc3, vmultiplier);
vacc0 = _mm_adds_epi16(vacc0, voutput_zero_point);
vacc1 = _mm_adds_epi16(vacc1, voutput_zero_point);
vacc2 = _mm_adds_epi16(vacc2, voutput_zero_point);
vacc3 = _mm_adds_epi16(vacc3, voutput_zero_point);
const __m128i vy0 = _mm_packus_epi16(vacc0, vacc1);
const __m128i vy1 = _mm_packus_epi16(vacc2, vacc3);
_mm_storeu_si128((__m128i*) output, vy0);
_mm_storeu_si128((__m128i*) (output + 16), vy1);
output += 32;
}
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
input += 16;
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
const __m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
_mm_storeu_si128((__m128i*) output, vy);
output += 16;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 15 * sizeof(uint8_t));
const __m128i vx = _mm_loadu_si128((const __m128i*) input);
__m128i vacc_lo = _mm_unpacklo_epi8(vx, vzero);
__m128i vacc_hi = _mm_unpackhi_epi8(vx, vzero);
vacc_lo = _mm_sub_epi16(vinput_zero_point, vacc_lo);
vacc_hi = _mm_sub_epi16(vinput_zero_point, vacc_hi);
vacc_lo = _mm_slli_epi16(vacc_lo, 7);
vacc_hi = _mm_slli_epi16(vacc_hi, 7);
vacc_lo = _mm_mulhrs_epi16(vacc_lo, vmultiplier);
vacc_hi = _mm_mulhrs_epi16(vacc_hi, vmultiplier);
vacc_lo = _mm_adds_epi16(vacc_lo, voutput_zero_point);
vacc_hi = _mm_adds_epi16(vacc_hi, voutput_zero_point);
__m128i vy = _mm_packus_epi16(vacc_lo, vacc_hi);
if (batch & (8 * sizeof(uint8_t))) {
_mm_storel_epi64((__m128i*) output, vy);
vy = _mm_unpackhi_epi64(vy, vy);
output += 8;
}
if (batch & (4 * sizeof(uint8_t))) {
unaligned_store_u32(output, (uint32_t) _mm_cvtsi128_si32(vy));
vy = _mm_srli_epi64(vy, 32);
output += 4;
}
uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy);
if (batch & (2 * sizeof(uint8_t))) {
unaligned_store_u16(output, (uint16_t) vy_lo);
vy_lo >>= 16;
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
*output = (uint8_t) vy_lo;
}
}
}
| 4,680
| 35.286822
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-wasmrelaxedsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__wasmrelaxedsimd_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
input += 16;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,014
| 31.771739
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-wasmrelaxedsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__wasmrelaxedsimd_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
v128_t vacc2 = wasm_u16x8_load8x8(input + 16);
v128_t vacc3 = wasm_u16x8_load8x8(input + 24);
input += 32;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vacc0 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc0, vmultiplier);
vacc1 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc1, vmultiplier);
vacc2 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc2, vmultiplier);
vacc3 = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc3, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_u8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,665
| 34.25
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-wasmrelaxedsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__wasmrelaxedsimd_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = __builtin_wasm_relaxed_q15mulr_s_i16x8(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,257
| 31.257143
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-wasmsimd-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__wasmsimd_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
input += 16;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
wasm_v128_store(output, vy0);
output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,943
| 31
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-wasmsimd-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__wasmsimd_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
v128_t vacc0 = wasm_u16x8_load8x8(input);
v128_t vacc1 = wasm_u16x8_load8x8(input + 8);
v128_t vacc2 = wasm_u16x8_load8x8(input + 16);
v128_t vacc3 = wasm_u16x8_load8x8(input + 24);
input += 32;
vacc0 = wasm_i16x8_sub(vinput_zero_point, vacc0);
vacc1 = wasm_i16x8_sub(vinput_zero_point, vacc1);
vacc2 = wasm_i16x8_sub(vinput_zero_point, vacc2);
vacc3 = wasm_i16x8_sub(vinput_zero_point, vacc3);
vacc0 = wasm_i16x8_shl(vacc0, 7);
vacc1 = wasm_i16x8_shl(vacc1, 7);
vacc2 = wasm_i16x8_shl(vacc2, 7);
vacc3 = wasm_i16x8_shl(vacc3, 7);
vacc0 = wasm_i16x8_q15mulr_sat(vacc0, vmultiplier);
vacc1 = wasm_i16x8_q15mulr_sat(vacc1, vmultiplier);
vacc2 = wasm_i16x8_q15mulr_sat(vacc2, vmultiplier);
vacc3 = wasm_i16x8_q15mulr_sat(vacc3, vmultiplier);
vacc0 = wasm_i16x8_add_sat(vacc0, voutput_zero_point);
vacc1 = wasm_i16x8_add_sat(vacc1, voutput_zero_point);
vacc2 = wasm_i16x8_add_sat(vacc2, voutput_zero_point);
vacc3 = wasm_i16x8_add_sat(vacc3, voutput_zero_point);
const v128_t vy0 = wasm_u8x16_narrow_i16x8(vacc0, vacc1);
const v128_t vy1 = wasm_u8x16_narrow_i16x8(vacc2, vacc3);
wasm_v128_store(output, vy0);
wasm_v128_store((output + 16), vy1);
output += 32;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 3,562
| 33.259615
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vcvt/gen/qu8-vcvt-wasmsimd-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vcvt/wasmsimd.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/common.h>
#include <xnnpack/vcvt.h>
void xnn_qu8_vcvt_ukernel__wasmsimd_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_cvt_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const v128_t vinput_zero_point = wasm_v128_load64_splat(params->wasmsimd.input_zero_point);
const v128_t vmultiplier = wasm_v128_load64_splat(params->wasmsimd.multiplier);
const v128_t voutput_zero_point = wasm_v128_load64_splat(params->wasmsimd.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
input += 8;
const v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
wasm_v128_store64_lane(output, vy, 0);
output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
v128_t vacc = wasm_u16x8_load8x8(input);
vacc = wasm_i16x8_sub(vinput_zero_point, vacc);
vacc = wasm_i16x8_shl(vacc, 7);
vacc = wasm_i16x8_q15mulr_sat(vacc, vmultiplier);
vacc = wasm_i16x8_add_sat(vacc, voutput_zero_point);
v128_t vy = wasm_u8x16_narrow_i16x8(vacc, vacc);
if (batch & (4 * sizeof(uint8_t))) {
wasm_v128_store32_lane(output, vy, 0);
vy = wasm_u64x2_shr(vy, 32);
output += 4;
}
if (batch & (2 * sizeof(uint8_t))) {
wasm_v128_store16_lane(output, vy, 0);
vy = wasm_u32x4_shr(vy, 16);
output += 2;
}
if (batch & (1 * sizeof(uint8_t))) {
wasm_v128_store8_lane(output, vy, 0);
}
}
}
| 2,218
| 30.7
| 95
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vhswish/gen/qu8-vhswish-neon-x16.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vhswish.h>
void xnn_qu8_vhswish_ukernel__neon_x16(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vinput_scale_div_exp = vld1q_dup_s16(¶ms->neon.input_scale_div_exp);
const int16x8_t vinput_scale_div_mantissa = vld1q_dup_s16(¶ms->neon.input_scale_div_mantissa);
const int16x8_t vscale_ratio = vld1q_dup_s16(¶ms->neon.scale_ratio);
const int16x8_t vhalf = vdupq_n_s16(16384);
const int16x8_t vzero = vdupq_n_s16(0);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 16 * sizeof(uint8_t); batch -= 16 * sizeof(uint8_t)) {
const uint8x16_t vx0 = vld1q_u8(input); input += 16;
int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
int16x8_t vin0 = vqdmulhq_s16(vacc0, vinput_scale_div_mantissa);
int16x8_t vin1 = vqdmulhq_s16(vacc1, vinput_scale_div_mantissa);
vin0 = vqshlq_s16(vin0, vinput_scale_div_exp);
vin1 = vqshlq_s16(vin1, vinput_scale_div_exp);
vin0 = vqsubq_s16(vin0, vhalf);
vin1 = vqsubq_s16(vin1, vhalf);
vin0 = vminq_s16(vin0, vzero);
vin1 = vminq_s16(vin1, vzero);
int16x8_t vout0 = vqdmulhq_s16(vacc0, vscale_ratio);
int16x8_t vout1 = vqdmulhq_s16(vacc1, vscale_ratio);
vout0 = vqdmulhq_s16(vout0, vin0);
vout1 = vqdmulhq_s16(vout1, vin1);
vout0 = vqaddq_s16(vout0, voutput_zero_point);
vout1 = vqaddq_s16(vout1, voutput_zero_point);
const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vout0), vqmovun_s16(vout1));
vst1q_u8(output, vy0); output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vout);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vout);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 4,076
| 38.970588
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vhswish/gen/qu8-vhswish-neon-x32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vhswish.h>
void xnn_qu8_vhswish_ukernel__neon_x32(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vinput_scale_div_exp = vld1q_dup_s16(¶ms->neon.input_scale_div_exp);
const int16x8_t vinput_scale_div_mantissa = vld1q_dup_s16(¶ms->neon.input_scale_div_mantissa);
const int16x8_t vscale_ratio = vld1q_dup_s16(¶ms->neon.scale_ratio);
const int16x8_t vhalf = vdupq_n_s16(16384);
const int16x8_t vzero = vdupq_n_s16(0);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 32 * sizeof(uint8_t); batch -= 32 * sizeof(uint8_t)) {
const uint8x16_t vx0 = vld1q_u8(input); input += 16;
const uint8x16_t vx1 = vld1q_u8(input); input += 16;
int16x8_t vacc0 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx0)));
int16x8_t vacc1 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx0)));
int16x8_t vacc2 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_low_u8(vx1)));
int16x8_t vacc3 = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vget_high_u8(vx1)));
vacc0 = vshlq_n_s16(vacc0, 7);
vacc1 = vshlq_n_s16(vacc1, 7);
vacc2 = vshlq_n_s16(vacc2, 7);
vacc3 = vshlq_n_s16(vacc3, 7);
int16x8_t vin0 = vqdmulhq_s16(vacc0, vinput_scale_div_mantissa);
int16x8_t vin1 = vqdmulhq_s16(vacc1, vinput_scale_div_mantissa);
int16x8_t vin2 = vqdmulhq_s16(vacc2, vinput_scale_div_mantissa);
int16x8_t vin3 = vqdmulhq_s16(vacc3, vinput_scale_div_mantissa);
vin0 = vqshlq_s16(vin0, vinput_scale_div_exp);
vin1 = vqshlq_s16(vin1, vinput_scale_div_exp);
vin2 = vqshlq_s16(vin2, vinput_scale_div_exp);
vin3 = vqshlq_s16(vin3, vinput_scale_div_exp);
vin0 = vqsubq_s16(vin0, vhalf);
vin1 = vqsubq_s16(vin1, vhalf);
vin2 = vqsubq_s16(vin2, vhalf);
vin3 = vqsubq_s16(vin3, vhalf);
vin0 = vminq_s16(vin0, vzero);
vin1 = vminq_s16(vin1, vzero);
vin2 = vminq_s16(vin2, vzero);
vin3 = vminq_s16(vin3, vzero);
int16x8_t vout0 = vqdmulhq_s16(vacc0, vscale_ratio);
int16x8_t vout1 = vqdmulhq_s16(vacc1, vscale_ratio);
int16x8_t vout2 = vqdmulhq_s16(vacc2, vscale_ratio);
int16x8_t vout3 = vqdmulhq_s16(vacc3, vscale_ratio);
vout0 = vqdmulhq_s16(vout0, vin0);
vout1 = vqdmulhq_s16(vout1, vin1);
vout2 = vqdmulhq_s16(vout2, vin2);
vout3 = vqdmulhq_s16(vout3, vin3);
vout0 = vqaddq_s16(vout0, voutput_zero_point);
vout1 = vqaddq_s16(vout1, voutput_zero_point);
vout2 = vqaddq_s16(vout2, voutput_zero_point);
vout3 = vqaddq_s16(vout3, voutput_zero_point);
const uint8x16_t vy0 = vcombine_u8(vqmovun_s16(vout0), vqmovun_s16(vout1));
const uint8x16_t vy1 = vcombine_u8(vqmovun_s16(vout2), vqmovun_s16(vout3));
vst1q_u8(output, vy0); output += 16;
vst1q_u8(output, vy1); output += 16;
}
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vout);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vout);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 5,185
| 41.162602
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vhswish/gen/qu8-vhswish-neon-x8.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/neon.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/vhswish.h>
void xnn_qu8_vhswish_ukernel__neon_x8(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint16x8_t vinput_zero_point = vld1q_dup_u16(¶ms->neon.input_zero_point);
const int16x8_t vinput_scale_div_exp = vld1q_dup_s16(¶ms->neon.input_scale_div_exp);
const int16x8_t vinput_scale_div_mantissa = vld1q_dup_s16(¶ms->neon.input_scale_div_mantissa);
const int16x8_t vscale_ratio = vld1q_dup_s16(¶ms->neon.scale_ratio);
const int16x8_t vhalf = vdupq_n_s16(16384);
const int16x8_t vzero = vdupq_n_s16(0);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->neon.output_zero_point);
for (; batch >= 8 * sizeof(uint8_t); batch -= 8 * sizeof(uint8_t)) {
const uint8x8_t vx = vld1_u8(input); input += 8;
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
const uint8x8_t vy = vqmovun_s16(vout);
vst1_u8(output, vy); output += 8;
}
if XNN_UNLIKELY(batch != 0) {
assert(batch >= 1 * sizeof(uint8_t));
assert(batch <= 7 * sizeof(uint8_t));
const uint8x8_t vx = vld1_u8(input);
int16x8_t vacc = vreinterpretq_s16_u16(vsubw_u8(vinput_zero_point, vx));
vacc = vshlq_n_s16(vacc, 7);
int16x8_t vin = vqdmulhq_s16(vacc, vinput_scale_div_mantissa);
vin = vqshlq_s16(vin, vinput_scale_div_exp);
vin = vqsubq_s16(vin, vhalf);
vin = vminq_s16(vin, vzero);
int16x8_t vout = vqdmulhq_s16(vacc, vscale_ratio);
vout = vqdmulhq_s16(vout, vin);
vout = vqaddq_s16(vout, voutput_zero_point);
uint8x8_t vy = vqmovun_s16(vout);
if (batch & (4 * sizeof(uint8_t))) {
vst1_lane_u32((void*) output, vreinterpret_u32_u8(vy), 0); output += 4;
vy = vext_u8(vy, vy, 4);
}
if (batch & (2 * sizeof(uint8_t))) {
vst1_lane_u16((void*) output, vreinterpret_u16_u8(vy), 0); output += 2;
vy = vext_u8(vy, vy, 2);
}
if (batch & (1 * sizeof(uint8_t))) {
vst1_lane_u8(output, vy, 0);
}
}
}
| 2,889
| 35.582278
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vhswish/gen/qu8-vhswish-scalar-x1.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vhswish.h>
void xnn_qu8_vhswish_ukernel__scalar_x1(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vinput_zero_point = (uint32_t) params->scalar.input_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
const int32_t vinput_scale_div_mantissa = params->scalar.input_scale_div_mantissa;
const int32_t vinput_scale_div_exp = params->scalar.input_scale_div_exp;
const int32_t vscale_ratio = params->scalar.scale_ratio;
do {
const int32_t vacc = (int32_t) ((vinput_zero_point - (uint32_t) *input++) << 7);
int32_t vin = vacc * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin <<= vinput_scale_div_exp;
} else {
vin >>= -vinput_scale_div_exp;
}
vin -= 16384;
vin = math_min_s32(vin, 0);
vin = math_max_s32(vin, -32768);
int32_t vout = math_asr_s32(vacc * vscale_ratio, 15);
vout = math_asr_s32(vin * vout, 15) + voutput_zero_point;
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
| 1,662
| 30.377358
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vhswish/gen/qu8-vhswish-scalar-x2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vhswish.h>
void xnn_qu8_vhswish_ukernel__scalar_x2(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vinput_zero_point = (uint32_t) params->scalar.input_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
const int32_t vinput_scale_div_mantissa = params->scalar.input_scale_div_mantissa;
const int32_t vinput_scale_div_exp = params->scalar.input_scale_div_exp;
const int32_t vscale_ratio = params->scalar.scale_ratio;
for (; batch >= 2 * sizeof(uint8_t); batch -= 2 * sizeof(uint8_t)) {
int32_t vacc0 = (int32_t) ((vinput_zero_point - (uint32_t) input[0]) << 7);
int32_t vacc1 = (int32_t) ((vinput_zero_point - (uint32_t) input[1]) << 7);
input += 2;
int32_t vin0 = vacc0 * vinput_scale_div_mantissa;
int32_t vin1 = vacc1 * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin0 <<= vinput_scale_div_exp;
vin1 <<= vinput_scale_div_exp;
} else {
vin0 >>= -vinput_scale_div_exp;
vin1 >>= -vinput_scale_div_exp;
}
vin0 -= 16384;
vin1 -= 16384;
vin0 = math_min_s32(vin0, 0);
vin1 = math_min_s32(vin1, 0);
vin0 = math_max_s32(vin0, -32768);
vin1 = math_max_s32(vin1, -32768);
int32_t vout0 = math_asr_s32(vacc0 * vscale_ratio, 15);
int32_t vout1 = math_asr_s32(vacc1 * vscale_ratio, 15);
vout0 = math_asr_s32(vin0 * vout0, 15) + voutput_zero_point;
vout1 = math_asr_s32(vin1 * vout1, 15) + voutput_zero_point;
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output += 2;
}
if XNN_UNLIKELY(batch != 0) {
const int32_t vacc = (int32_t) ((vinput_zero_point - (uint32_t) *input++) << 7);
int32_t vin = vacc * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin <<= vinput_scale_div_exp;
} else {
vin >>= -vinput_scale_div_exp;
}
vin -= 16384;
vin = math_min_s32(vin, 0);
vin = math_max_s32(vin, -32768);
int32_t vout = math_asr_s32(vacc * vscale_ratio, 15);
vout = math_asr_s32(vin * vout, 15) + voutput_zero_point;
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
}
}
| 2,875
| 30.26087
| 84
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-vhswish/gen/qu8-vhswish-scalar-x4.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-vhswish/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2023 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/vhswish.h>
void xnn_qu8_vhswish_ukernel__scalar_x4(
size_t batch,
const uint8_t* input,
uint8_t* output,
const union xnn_qu8_hswish_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(batch != 0);
assert(batch % sizeof(uint8_t) == 0);
assert(input != NULL);
assert(output != NULL);
const uint32_t vinput_zero_point = (uint32_t) params->scalar.input_zero_point;
const int32_t voutput_zero_point = params->scalar.output_zero_point;
const int32_t vinput_scale_div_mantissa = params->scalar.input_scale_div_mantissa;
const int32_t vinput_scale_div_exp = params->scalar.input_scale_div_exp;
const int32_t vscale_ratio = params->scalar.scale_ratio;
for (; batch >= 4 * sizeof(uint8_t); batch -= 4 * sizeof(uint8_t)) {
int32_t vacc0 = (int32_t) ((vinput_zero_point - (uint32_t) input[0]) << 7);
int32_t vacc1 = (int32_t) ((vinput_zero_point - (uint32_t) input[1]) << 7);
int32_t vacc2 = (int32_t) ((vinput_zero_point - (uint32_t) input[2]) << 7);
int32_t vacc3 = (int32_t) ((vinput_zero_point - (uint32_t) input[3]) << 7);
input += 4;
int32_t vin0 = vacc0 * vinput_scale_div_mantissa;
int32_t vin1 = vacc1 * vinput_scale_div_mantissa;
int32_t vin2 = vacc2 * vinput_scale_div_mantissa;
int32_t vin3 = vacc3 * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin0 <<= vinput_scale_div_exp;
vin1 <<= vinput_scale_div_exp;
vin2 <<= vinput_scale_div_exp;
vin3 <<= vinput_scale_div_exp;
} else {
vin0 >>= -vinput_scale_div_exp;
vin1 >>= -vinput_scale_div_exp;
vin2 >>= -vinput_scale_div_exp;
vin3 >>= -vinput_scale_div_exp;
}
vin0 -= 16384;
vin1 -= 16384;
vin2 -= 16384;
vin3 -= 16384;
vin0 = math_min_s32(vin0, 0);
vin1 = math_min_s32(vin1, 0);
vin2 = math_min_s32(vin2, 0);
vin3 = math_min_s32(vin3, 0);
vin0 = math_max_s32(vin0, -32768);
vin1 = math_max_s32(vin1, -32768);
vin2 = math_max_s32(vin2, -32768);
vin3 = math_max_s32(vin3, -32768);
int32_t vout0 = math_asr_s32(vacc0 * vscale_ratio, 15);
int32_t vout1 = math_asr_s32(vacc1 * vscale_ratio, 15);
int32_t vout2 = math_asr_s32(vacc2 * vscale_ratio, 15);
int32_t vout3 = math_asr_s32(vacc3 * vscale_ratio, 15);
vout0 = math_asr_s32(vin0 * vout0, 15) + voutput_zero_point;
vout1 = math_asr_s32(vin1 * vout1, 15) + voutput_zero_point;
vout2 = math_asr_s32(vin2 * vout2, 15) + voutput_zero_point;
vout3 = math_asr_s32(vin3 * vout3, 15) + voutput_zero_point;
vout0 = math_max_s32(vout0, 0);
vout1 = math_max_s32(vout1, 0);
vout2 = math_max_s32(vout2, 0);
vout3 = math_max_s32(vout3, 0);
vout0 = math_min_s32(vout0, 255);
vout1 = math_min_s32(vout1, 255);
vout2 = math_min_s32(vout2, 255);
vout3 = math_min_s32(vout3, 255);
output[0] = (uint8_t) vout0;
output[1] = (uint8_t) vout1;
output[2] = (uint8_t) vout2;
output[3] = (uint8_t) vout3;
output += 4;
}
if XNN_UNLIKELY(batch != 0) {
do {
const int32_t vacc = (int32_t) ((vinput_zero_point - (uint32_t) *input++) << 7);
int32_t vin = vacc * vinput_scale_div_mantissa;
if (vinput_scale_div_exp > 0) {
vin <<= vinput_scale_div_exp;
} else {
vin >>= -vinput_scale_div_exp;
}
vin -= 16384;
vin = math_min_s32(vin, 0);
vin = math_max_s32(vin, -32768);
int32_t vout = math_asr_s32(vacc * vscale_ratio, 15);
vout = math_asr_s32(vin * vout, 15) + voutput_zero_point;
vout = math_max_s32(vout, 0);
vout = math_min_s32(vout, 255);
*output++ = (uint8_t) vout;
batch -= sizeof(uint8_t);
} while (batch != 0);
}
}
| 4,039
| 32.666667
| 86
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.