repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 7,861
| 38.31
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,603
| 37.40404
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,696
| 37.678392
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,189
| 35.683673
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,087
| 35.536082
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,472
| 35.99505
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,565
| 36.270936
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,637
| 34.910828
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,730
| 35.272152
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,692
| 35.261146
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,862
| 35.874214
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,639
| 34.923567
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,732
| 35.28481
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,553
| 33.7125
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,451
| 33.506329
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,578
| 33.652174
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,671
| 34.012346
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,831
| 34.779141
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,928
| 34.716867
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 6,232
| 37.239264
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 6,406
| 37.365269
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,833
| 34.791411
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,930
| 34.728916
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,772
| 33.568862
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x4c8-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,869
| 33.529412
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x8-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_2x8__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(uint8_t)) {
const uint8x8_t va0 = vld1_u8(a0); a0 += 8;
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t va1 = vld1_u8(a1); a1 += 8;
const int16x8_t vxa1 = vreinterpretq_s16_u16(vmovl_u8(va1));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
const uint8x8_t vb01234567c7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c7 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c7, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
k -= 8 * sizeof(uint8_t);
}
if XNN_UNLIKELY(k != 0) {
const uint8x8_t va0 = vld1_u8(a0); a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t va1 = vld1_u8(a1); a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vreinterpretq_s16_u16(vmovl_u8(va1));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
if (k >= 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
if (k > 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
if (k >= 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
if (k > 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
if (k >= 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
if (k > 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
}
}
}
}
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,776
| 53.78125
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_2x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 2x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
// Load a 8x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 2x8 * 8x8 --> 2x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 2x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
// Load a 4x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 2x4 * 4x8 --> 2x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
}
p -= 2 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,832
| 42.945274
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-2x8c8-minmax-fp32-avx2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx8c8-avx2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_2x8c8__avx2(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1);
const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1);
const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]);
const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]);
__m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1);
const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]);
const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]);
__m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1);
__m256i vacc1x01 = vacc0x01;
__m256i vacc1x23 = vacc0x23;
__m256i vacc1x45 = vacc0x45;
__m256i vacc1x67 = vacc0x67;
w = (const int32_t*) w + 8;
size_t p = ks;
const __m256i vb_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.kernel_zero_point);
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0));
const __m256i vxa0 = _mm256_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1));
const __m256i vxa1 = _mm256_cvtepu8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m256i vxb01 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb01), vb_zero_point);
vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01));
vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m256i vxb23 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb23), vb_zero_point);
vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23));
vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23));
const __m128i vb45 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 32));
const __m256i vxb45 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb45), vb_zero_point);
vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45));
vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45));
const __m128i vb67 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 48));
const __m256i vxb67 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb67), vb_zero_point);
vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67));
vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67));
w = (const void*) ((const uint8_t*) w + 64);
k += 8 * sizeof(uint8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23);
const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67);
const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23);
const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67);
const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657);
const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657);
const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
__m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask);
__m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask);
__m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567);
__m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567);
const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale);
vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point);
vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point);
vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567);
vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point);
__m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point);
vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0));
__m256i vout = _mm256_packus_epi16(vacc01x01234567, vacc01x01234567);
vout = _mm256_max_epu8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min));
__m128i vout_lo = _mm256_castsi256_si128(vout);
__m128i vout_hi = _mm256_extracti128_si256(vout, 1);
if (nc >= 8) {
_mm_storel_epi64((__m128i*) c1, vout_hi);
_mm_storel_epi64((__m128i*) c0, vout_lo);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_si32(c1, vout_hi);
_mm_storeu_si32(c0, vout_lo);
c1 += 4;
c0 += 4;
vout_lo = _mm_srli_epi64(vout_lo, 32);
vout_hi = _mm_srli_epi64(vout_hi, 32);
}
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0));
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0));
c1 += 2;
c0 += 2;
vout_lo = _mm_srli_epi32(vout_lo, 16);
vout_hi = _mm_srli_epi32(vout_hi, 16);
}
if (nc & 1) {
*c1 = (uint8_t) _mm_extract_epi8(vout_hi, 0);
*c0 = (uint8_t) _mm_extract_epi8(vout_lo, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,725
| 38.020202
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x16c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_3x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc1x89AB = vpacc0x89AB;
uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x4_t vpacc2x89AB = vpacc0x89AB;
uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 3x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
// Load a 8x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 3x8 * 8x16 --> 3x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 3x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
// Load a 4x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 3x4 * 4x16 --> 3x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
}
p -= 3 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
uint8x8_t vout2x01234567 = vget_low_u8(vout2x0123456789ABCDEF);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
if (nc & 8) {
vst1_u8(c2, vout2x01234567); c2 += 8; // This line
vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
vout2x01234567 = vget_high_u8(vout2x0123456789ABCDEF);
vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
}
if (nc & 4) {
vst1_lane_u32((void*) c2, vreinterpret_u32_u8(vout2x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c2, vreinterpret_u16_u8(vout2x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c2, vout2x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 17,360
| 53.59434
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 5,939
| 33.137931
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0);
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout2x0 = math_max_s32(vout2x0, vmagic_min);
vout2x1 = math_max_s32(vout2x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout2x0 = math_min_s32(vout2x0, vmagic_max);
vout2x1 = math_min_s32(vout2x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout2x0 -= vmagic_bias_less_zero_point;
vout2x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 5,652
| 30.232044
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 5,784
| 32.247126
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,057
| 33.816092
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x2-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_3x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding;
const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding;
const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding;
const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift);
int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift);
int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift);
int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout1x0 += voutput_zero_point;
vout1x1 += voutput_zero_point;
vout2x0 += voutput_zero_point;
vout2x1 += voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 5,838
| 33.755952
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point;
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 9,022
| 36.131687
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2);
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3);
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0);
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1);
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2);
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout1x2 = math_max_s32(vout1x2, vmagic_min);
vout1x3 = math_max_s32(vout1x3, vmagic_min);
vout2x0 = math_max_s32(vout2x0, vmagic_min);
vout2x1 = math_max_s32(vout2x1, vmagic_min);
vout2x2 = math_max_s32(vout2x2, vmagic_min);
vout2x3 = math_max_s32(vout2x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout1x2 = math_min_s32(vout1x2, vmagic_max);
vout1x3 = math_min_s32(vout1x3, vmagic_max);
vout2x0 = math_min_s32(vout2x0, vmagic_max);
vout2x1 = math_min_s32(vout2x1, vmagic_max);
vout2x2 = math_min_s32(vout2x2, vmagic_max);
vout2x3 = math_min_s32(vout2x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout1x2 -= vmagic_bias_less_zero_point;
vout1x3 -= vmagic_bias_less_zero_point;
vout2x0 -= vmagic_bias_less_zero_point;
vout2x1 -= vmagic_bias_less_zero_point;
vout2x2 -= vmagic_bias_less_zero_point;
vout2x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 8,525
| 32.304688
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 8,795
| 35.197531
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = __builtin_wasm_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = __builtin_wasm_max_f32(vfpacc2x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = __builtin_wasm_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = __builtin_wasm_min_f32(vfpacc2x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point;
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 9,260
| 37.111111
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_3x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const int32_t vb_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding;
const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding;
const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding;
const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding;
const int64_t vextacc1x2 = math_mulext_s32(vacc1x2, vmultiplier) + vrounding;
const int64_t vextacc1x3 = math_mulext_s32(vacc1x3, vmultiplier) + vrounding;
const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding;
const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding;
const int64_t vextacc2x2 = math_mulext_s32(vacc2x2, vmultiplier) + vrounding;
const int64_t vextacc2x3 = math_mulext_s32(vacc2x3, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift);
int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift);
int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift);
int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift);
int32_t vout1x2 = (int32_t) math_asr_s64(vextacc1x2, vshift);
int32_t vout1x3 = (int32_t) math_asr_s64(vextacc1x3, vshift);
int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift);
int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift);
int32_t vout2x2 = (int32_t) math_asr_s64(vextacc2x2, vshift);
int32_t vout2x3 = (int32_t) math_asr_s64(vextacc2x3, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout0x2 += voutput_zero_point;
vout0x3 += voutput_zero_point;
vout1x0 += voutput_zero_point;
vout1x1 += voutput_zero_point;
vout1x2 += voutput_zero_point;
vout1x3 += voutput_zero_point;
vout2x0 += voutput_zero_point;
vout2x1 += voutput_zero_point;
vout2x2 += voutput_zero_point;
vout2x3 += voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 8,825
| 37.207792
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,724
| 40.033755
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,817
| 40.252101
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 9,870
| 40.649789
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 10,040
| 41.012552
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,726
| 40.042194
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,819
| 40.260504
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc22x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,162
| 38.15812
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc22x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,060
| 38.056034
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,504
| 38.439834
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,597
| 38.661157
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,160
| 37.5
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,253
| 37.791444
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 7,264
| 38.05914
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 7,434
| 38.547872
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,162
| 37.510753
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,255
| 37.802139
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc22x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,996
| 36.021164
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc22x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,894
| 35.871658
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,045
| 36.084211
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123);
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 3 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,138
| 36.376963
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,415
| 37.030769
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,512
| 36.944444
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2));
const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 8,038
| 40.225641
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2));
const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 8,212
| 40.271357
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,417
| 37.041026
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,514
| 36.954545
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0);
vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2);
vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3);
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,300
| 35.688442
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x4c8-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
__m128i vacc2x0 = vacc0x0;
__m128i vacc2x1 = vacc0x1;
__m128i vacc2x2 = vacc0x2;
__m128i vacc2x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3);
w = (const void*) ((const uint8_t*) w + 32);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc22x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,397
| 35.623762
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x8-minmax-rndnu-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_3x8__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const uint8x8_t vb_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
int32x4_t vacc2x0123 = vacc0x0123;
int32x4_t vacc2x4567 = vacc0x4567;
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = kc;
while (k >= 8 * sizeof(uint8_t)) {
const uint8x8_t va0 = vld1_u8(a0); a0 += 8;
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t va1 = vld1_u8(a1); a1 += 8;
const int16x8_t vxa1 = vreinterpretq_s16_u16(vmovl_u8(va1));
const uint8x8_t va2 = vld1_u8(a2); a2 += 8;
const int16x8_t vxa2 = vreinterpretq_s16_u16(vmovl_u8(va2));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
const uint8x8_t vb01234567c7 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c7 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c7, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3);
k -= 8 * sizeof(uint8_t);
}
if XNN_UNLIKELY(k != 0) {
const uint8x8_t va0 = vld1_u8(a0); a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vreinterpretq_s16_u16(vmovl_u8(va0));
const uint8x8_t va1 = vld1_u8(a1); a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vreinterpretq_s16_u16(vmovl_u8(va1));
const uint8x8_t va2 = vld1_u8(a2); a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const int16x8_t vxa2 = vreinterpretq_s16_u16(vmovl_u8(va2));
const uint8x8_t vb01234567c0 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c0 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c0, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0);
if (k >= 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c1 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c1 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c1, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1);
if (k > 2 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c2 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c2 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c2, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2);
if (k >= 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c3 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c3 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c3, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3);
if (k > 4 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c4 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c4 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c4, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0);
if (k >= 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c5 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c5 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c5, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1);
if (k > 6 * sizeof(uint8_t)) {
const uint8x8_t vb01234567c6 = vld1_u8(w); w = (const void*) ((const uint8_t*) w + 8);
const int16x8_t vxb01234567c6 = vreinterpretq_s16_u16(vsubl_u8(vb01234567c6, vb_zero_point));
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2);
}
}
}
}
}
}
}
p -= 3 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
uint8x8_t vout2x01234567 = vqmovun_s16(vacc2x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
uint8x8_t vout2x01234567 = vqmovun_s16(vacc2x01234567);
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567 = vmax_u8(vout2x01234567, vget_low_u8(voutput_min));
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567 = vmin_u8(vout2x01234567, vget_low_u8(voutput_max));
if (nc >= 8) {
vst1_u8(c2 + 0, vout2x01234567);
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c2, vreinterpret_u32_u8(vout2x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c2, vreinterpret_u16_u8(vout2x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c2, vout2x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 20,901
| 58.212465
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x8c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_3x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 8 bias values are loaded from the
// weight matrix, at the start of the group of 8 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 3x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
// Load a 8x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 3x8 * 8x8 --> 3x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 3x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
// Load a 4x8 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 3x4 * 4x8 --> 3x8.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
}
p -= 3 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc1x01234567);
uint8x8_t vout2x01234567 = vqmovun_s16(vacc2x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc1x01234567));
uint8x8_t vout2x01234567 = vqmovun_s16(vacc2x01234567);
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x01234567_1x01234567 = vmaxq_u8(vout0x01234567_1x01234567, voutput_min);
vout2x01234567 = vmax_u8(vout2x01234567, vget_low_u8(voutput_min));
vout0x01234567_1x01234567 = vminq_u8(vout0x01234567_1x01234567, voutput_max);
vout2x01234567 = vmin_u8(vout2x01234567, vget_low_u8(voutput_max));
if (nc >= 8) {
vst1_u8(c2 + 0, vout2x01234567);
vst1_u8(c1 + 0, vget_high_u8(vout0x01234567_1x01234567));
vst1_u8(c0 + 0, vget_low_u8(vout0x01234567_1x01234567));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c2, vreinterpret_u32_u8(vout2x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c2, vreinterpret_u16_u8(vout2x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout2x01234567 = vext_u8(vout2x01234567, vout2x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1_lane_u8(c2, vout2x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,592
| 46.318367
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-3x8c8-minmax-fp32-avx2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx8c8-avx2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_3x8c8__avx2(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 3);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (3 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
do {
const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1);
const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1);
const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]);
const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]);
__m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1);
const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]);
const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]);
__m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1);
__m256i vacc1x01 = vacc0x01;
__m256i vacc1x23 = vacc0x23;
__m256i vacc1x45 = vacc0x45;
__m256i vacc1x67 = vacc0x67;
__m256i vacc2x01 = vacc0x01;
__m256i vacc2x23 = vacc0x23;
__m256i vacc2x45 = vacc0x45;
__m256i vacc2x67 = vacc0x67;
w = (const int32_t*) w + 8;
size_t p = ks;
const __m256i vb_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.kernel_zero_point);
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
a += 3;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0));
const __m256i vxa0 = _mm256_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1));
const __m256i vxa1 = _mm256_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a2));
const __m256i vxa2 = _mm256_cvtepu8_epi16(va2);
a2 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m256i vxb01 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb01), vb_zero_point);
vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01));
vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01));
vacc2x01 = _mm256_add_epi32(vacc2x01, _mm256_madd_epi16(vxa2, vxb01));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m256i vxb23 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb23), vb_zero_point);
vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23));
vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23));
vacc2x23 = _mm256_add_epi32(vacc2x23, _mm256_madd_epi16(vxa2, vxb23));
const __m128i vb45 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 32));
const __m256i vxb45 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb45), vb_zero_point);
vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45));
vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45));
vacc2x45 = _mm256_add_epi32(vacc2x45, _mm256_madd_epi16(vxa2, vxb45));
const __m128i vb67 = _mm_load_si128((const __m128i*) ((const uint8_t*) w + 48));
const __m256i vxb67 = _mm256_sub_epi16(_mm256_cvtepu8_epi16(vb67), vb_zero_point);
vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67));
vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67));
vacc2x67 = _mm256_add_epi32(vacc2x67, _mm256_madd_epi16(vxa2, vxb67));
w = (const void*) ((const uint8_t*) w + 64);
k += 8 * sizeof(uint8_t);
}
p -= 3 * sizeof(void*);
} while (p != 0);
const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23);
const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67);
const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23);
const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67);
const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23);
const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67);
const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657);
const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657);
const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657);
const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
__m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask);
__m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask);
__m256i vacc2x01234567 = _mm256_permutevar8x32_epi32(vacc2x02461357, vpermute_mask);
__m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567);
__m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567);
__m256 vscaled2x01234567 = _mm256_cvtepi32_ps(vacc2x01234567);
const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale);
vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale);
vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale);
vscaled2x01234567 = _mm256_mul_ps(vscaled2x01234567, vscale);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point);
vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point);
vscaled2x01234567 = _mm256_min_ps(vscaled2x01234567, voutput_max_less_zero_point);
vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567);
vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567);
vacc2x01234567 = _mm256_cvtps_epi32(vscaled2x01234567);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point);
__m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point);
__m256i vacc22x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc2x01234567, vacc2x01234567), voutput_zero_point);
vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0));
vacc22x01234567 = _mm256_permute4x64_epi64(vacc22x01234567, _MM_SHUFFLE(3, 1, 2, 0));
__m256i vout = _mm256_packus_epi16(vacc01x01234567, vacc22x01234567);
vout = _mm256_max_epu8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min));
__m128i vout_lo = _mm256_castsi256_si128(vout);
__m128i vout_hi = _mm256_extracti128_si256(vout, 1);
if (nc >= 8) {
_mm_storeh_pi((__m64*) c2, _mm_castsi128_ps(vout_lo));
_mm_storel_epi64((__m128i*) c1, vout_hi);
_mm_storel_epi64((__m128i*) c0, vout_lo);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout_lo, 2));
_mm_storeu_si32(c1, vout_hi);
_mm_storeu_si32(c0, vout_lo);
c2 += 4;
c1 += 4;
c0 += 4;
vout_lo = _mm_srli_epi64(vout_lo, 32);
vout_hi = _mm_srli_epi64(vout_hi, 32);
}
if (nc & 2) {
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout_lo, 4));
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0));
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0));
c2 += 2;
c1 += 2;
c0 += 2;
vout_lo = _mm_srli_epi32(vout_lo, 16);
vout_hi = _mm_srli_epi32(vout_hi, 16);
}
if (nc & 1) {
*c2 = (uint8_t) _mm_extract_epi8(vout_lo, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout_hi, 0);
*c0 = (uint8_t) _mm_extract_epi8(vout_lo, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,739
| 40.623932
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x16c4-minmax-fp32-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->fp32_neonv8.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc1x89AB = vpacc0x89AB;
uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x4_t vpacc2x89AB = vpacc0x89AB;
uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
uint32x4_t vpacc3x0123 = vpacc0x0123;
uint32x4_t vpacc3x4567 = vpacc0x4567;
uint32x4_t vpacc3x89AB = vpacc0x89AB;
uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
uint32x2_t vnacc3 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 4x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
// Load a 8x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 4x8 * 8x16 --> 4x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
// Load a 4x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 4x4 * 4x16 --> 4x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
vnacc3 = vpadd_u32(vnacc3, vnacc3);
const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc0x89AB = vcvtq_f32_s32(vacc0x89AB);
float32x4_t vfpacc0xCDEF = vcvtq_f32_s32(vacc0xCDEF);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
float32x4_t vfpacc1x89AB = vcvtq_f32_s32(vacc1x89AB);
float32x4_t vfpacc1xCDEF = vcvtq_f32_s32(vacc1xCDEF);
float32x4_t vfpacc2x0123 = vcvtq_f32_s32(vacc2x0123);
float32x4_t vfpacc2x4567 = vcvtq_f32_s32(vacc2x4567);
float32x4_t vfpacc2x89AB = vcvtq_f32_s32(vacc2x89AB);
float32x4_t vfpacc2xCDEF = vcvtq_f32_s32(vacc2xCDEF);
float32x4_t vfpacc3x0123 = vcvtq_f32_s32(vacc3x0123);
float32x4_t vfpacc3x4567 = vcvtq_f32_s32(vacc3x4567);
float32x4_t vfpacc3x89AB = vcvtq_f32_s32(vacc3x89AB);
float32x4_t vfpacc3xCDEF = vcvtq_f32_s32(vacc3xCDEF);
const float32x4_t vscale = vld1q_dup_f32(¶ms->fp32_neonv8.scale);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale);
vfpacc0x89AB = vmulq_f32(vfpacc0x89AB, vscale);
vfpacc0xCDEF = vmulq_f32(vfpacc0xCDEF, vscale);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale);
vfpacc1x89AB = vmulq_f32(vfpacc1x89AB, vscale);
vfpacc1xCDEF = vmulq_f32(vfpacc1xCDEF, vscale);
vfpacc2x0123 = vmulq_f32(vfpacc2x0123, vscale);
vfpacc2x4567 = vmulq_f32(vfpacc2x4567, vscale);
vfpacc2x89AB = vmulq_f32(vfpacc2x89AB, vscale);
vfpacc2xCDEF = vmulq_f32(vfpacc2xCDEF, vscale);
vfpacc3x0123 = vmulq_f32(vfpacc3x0123, vscale);
vfpacc3x4567 = vmulq_f32(vfpacc3x4567, vscale);
vfpacc3x89AB = vmulq_f32(vfpacc3x89AB, vscale);
vfpacc3xCDEF = vmulq_f32(vfpacc3xCDEF, vscale);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc0x89AB = vcvtnq_s32_f32(vfpacc0x89AB);
vacc0xCDEF = vcvtnq_s32_f32(vfpacc0xCDEF);
vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
vacc1x89AB = vcvtnq_s32_f32(vfpacc1x89AB);
vacc1xCDEF = vcvtnq_s32_f32(vfpacc1xCDEF);
vacc2x0123 = vcvtnq_s32_f32(vfpacc2x0123);
vacc2x4567 = vcvtnq_s32_f32(vfpacc2x4567);
vacc2x89AB = vcvtnq_s32_f32(vfpacc2x89AB);
vacc2xCDEF = vcvtnq_s32_f32(vfpacc2xCDEF);
vacc3x0123 = vcvtnq_s32_f32(vfpacc3x0123);
vacc3x4567 = vcvtnq_s32_f32(vfpacc3x4567);
vacc3x89AB = vcvtnq_s32_f32(vfpacc3x89AB);
vacc3xCDEF = vcvtnq_s32_f32(vfpacc3xCDEF);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->fp32_neonv8.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->fp32_neonv8.output_max);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
if (nc & 8) {
vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
}
if (nc & 4) {
vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,368
| 55.681698
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x16c4-minmax-rndnu-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qu8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_4x16c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const uint8x8_t va_zero_point = vld1_dup_u8(¶ms->rndnu_neon.kernel_zero_point[0]);
do {
// Initialize accumulators with bias. 16 bias values are loaded from the
// weight matrix, at the start of the group of 16 columns.
uint32x4_t vpacc0x0123 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x4567 = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0x89AB = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc0xCDEF = vld1q_u32(w); w = (const void*) ((const uint32_t*) w + 4);
uint32x4_t vpacc1x0123 = vpacc0x0123;
uint32x4_t vpacc1x4567 = vpacc0x4567;
uint32x4_t vpacc1x89AB = vpacc0x89AB;
uint32x4_t vpacc1xCDEF = vpacc0xCDEF;
uint32x4_t vpacc2x0123 = vpacc0x0123;
uint32x4_t vpacc2x4567 = vpacc0x4567;
uint32x4_t vpacc2x89AB = vpacc0x89AB;
uint32x4_t vpacc2xCDEF = vpacc0xCDEF;
uint32x4_t vpacc3x0123 = vpacc0x0123;
uint32x4_t vpacc3x4567 = vpacc0x4567;
uint32x4_t vpacc3x89AB = vpacc0x89AB;
uint32x4_t vpacc3xCDEF = vpacc0xCDEF;
uint32x2_t vnacc0 = vmov_n_u32(0);
uint32x2_t vnacc1 = vmov_n_u32(0);
uint32x2_t vnacc2 = vmov_n_u32(0);
uint32x2_t vnacc3 = vmov_n_u32(0);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
// Inner accumulation loop along the 16 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(uint8_t)) {
// Load a 4x8 block of activations.
const uint8x8_t va0x01234567 = vld1_u8(a0); a0 += 8;
const uint8x8_t va1x01234567 = vld1_u8(a1); a1 += 8;
const uint8x8_t va2x01234567 = vld1_u8(a2); a2 += 8;
const uint8x8_t va3x01234567 = vld1_u8(a3); a3 += 8;
// Load a 8x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb4567xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 4x8 * 8x16 --> 4x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb4567x0123, va0x01234567, 1);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb4567x4567, va0x01234567, 1);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb4567x89AB, va0x01234567, 1);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb4567xCDEF, va0x01234567, 1);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb4567x0123, va1x01234567, 1);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb4567x4567, va1x01234567, 1);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb4567x89AB, va1x01234567, 1);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb4567xCDEF, va1x01234567, 1);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb4567x0123, va2x01234567, 1);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb4567x4567, va2x01234567, 1);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb4567x89AB, va2x01234567, 1);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb4567xCDEF, va2x01234567, 1);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb4567x0123, va3x01234567, 1);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb4567x4567, va3x01234567, 1);
vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb4567x89AB, va3x01234567, 1);
vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb4567xCDEF, va3x01234567, 1);
k -= 8 * sizeof(uint8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 4x4 block of activations.
const uint8x8_t va0x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a0, vmov_n_u32(0), 0)); a0 += 4;
const uint8x8_t va1x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a1, vmov_n_u32(0), 0)); a1 += 4;
const uint8x8_t va2x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a2, vmov_n_u32(0), 0)); a2 += 4;
const uint8x8_t va3x01234567 = vreinterpret_u8_u32(vld1_lane_u32((const void*) a3, vmov_n_u32(0), 0)); a3 += 4;
// Load a 4x16 block of weights.
const uint8x16_t vb0123x0123 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x4567 = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123x89AB = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
const uint8x16_t vb0123xCDEF = vld1q_u8(w); w = (const void*) ((const uint8_t*) w + 16);
// Multiply-accumulate: 4x4 * 4x16 --> 4x16.
vnacc0 = vdot_u32(vnacc0, va_zero_point, va0x01234567);
vpacc0x0123 = vdotq_lane_u32(vpacc0x0123, vb0123x0123, va0x01234567, 0);
vpacc0x4567 = vdotq_lane_u32(vpacc0x4567, vb0123x4567, va0x01234567, 0);
vpacc0x89AB = vdotq_lane_u32(vpacc0x89AB, vb0123x89AB, va0x01234567, 0);
vpacc0xCDEF = vdotq_lane_u32(vpacc0xCDEF, vb0123xCDEF, va0x01234567, 0);
vnacc1 = vdot_u32(vnacc1, va_zero_point, va1x01234567);
vpacc1x0123 = vdotq_lane_u32(vpacc1x0123, vb0123x0123, va1x01234567, 0);
vpacc1x4567 = vdotq_lane_u32(vpacc1x4567, vb0123x4567, va1x01234567, 0);
vpacc1x89AB = vdotq_lane_u32(vpacc1x89AB, vb0123x89AB, va1x01234567, 0);
vpacc1xCDEF = vdotq_lane_u32(vpacc1xCDEF, vb0123xCDEF, va1x01234567, 0);
vnacc2 = vdot_u32(vnacc2, va_zero_point, va2x01234567);
vpacc2x0123 = vdotq_lane_u32(vpacc2x0123, vb0123x0123, va2x01234567, 0);
vpacc2x4567 = vdotq_lane_u32(vpacc2x4567, vb0123x4567, va2x01234567, 0);
vpacc2x89AB = vdotq_lane_u32(vpacc2x89AB, vb0123x89AB, va2x01234567, 0);
vpacc2xCDEF = vdotq_lane_u32(vpacc2xCDEF, vb0123xCDEF, va2x01234567, 0);
vnacc3 = vdot_u32(vnacc3, va_zero_point, va3x01234567);
vpacc3x0123 = vdotq_lane_u32(vpacc3x0123, vb0123x0123, va3x01234567, 0);
vpacc3x4567 = vdotq_lane_u32(vpacc3x4567, vb0123x4567, va3x01234567, 0);
vpacc3x89AB = vdotq_lane_u32(vpacc3x89AB, vb0123x89AB, va3x01234567, 0);
vpacc3xCDEF = vdotq_lane_u32(vpacc3xCDEF, vb0123xCDEF, va3x01234567, 0);
}
p -= 4 * sizeof(void*);
} while (p != 0);
// Subtract zero point from accumulators.
vnacc0 = vpadd_u32(vnacc0, vnacc0);
const uint32x4_t vnacc0x0123 = vcombine_u32(vnacc0, vnacc0);
int32x4_t vacc0x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x0123, vnacc0x0123));
int32x4_t vacc0x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc0x4567, vnacc0x0123));
int32x4_t vacc0x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc0x89AB, vnacc0x0123));
int32x4_t vacc0xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc0xCDEF, vnacc0x0123));
vnacc1 = vpadd_u32(vnacc1, vnacc1);
const uint32x4_t vnacc1x0123 = vcombine_u32(vnacc1, vnacc1);
int32x4_t vacc1x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x0123, vnacc1x0123));
int32x4_t vacc1x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc1x4567, vnacc1x0123));
int32x4_t vacc1x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc1x89AB, vnacc1x0123));
int32x4_t vacc1xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc1xCDEF, vnacc1x0123));
vnacc2 = vpadd_u32(vnacc2, vnacc2);
const uint32x4_t vnacc2x0123 = vcombine_u32(vnacc2, vnacc2);
int32x4_t vacc2x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x0123, vnacc2x0123));
int32x4_t vacc2x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc2x4567, vnacc2x0123));
int32x4_t vacc2x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc2x89AB, vnacc2x0123));
int32x4_t vacc2xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc2xCDEF, vnacc2x0123));
vnacc3 = vpadd_u32(vnacc3, vnacc3);
const uint32x4_t vnacc3x0123 = vcombine_u32(vnacc3, vnacc3);
int32x4_t vacc3x0123 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x0123, vnacc3x0123));
int32x4_t vacc3x4567 = vreinterpretq_s32_u32(vsubq_u32(vpacc3x4567, vnacc3x0123));
int32x4_t vacc3x89AB = vreinterpretq_s32_u32(vsubq_u32(vpacc3x89AB, vnacc3x0123));
int32x4_t vacc3xCDEF = vreinterpretq_s32_u32(vsubq_u32(vpacc3xCDEF, vnacc3x0123));
const int32x4_t vright_pre_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_pre_shift);
const int32x4_t vmultiplier = vld1q_dup_s32(¶ms->rndnu_neon.multiplier);
const int32x4_t vright_post_shift = vld1q_dup_s32(¶ms->rndnu_neon.right_post_shift);
vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift);
vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift);
vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift);
vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift);
vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift);
vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift);
vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift);
vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift);
vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift);
vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift);
vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift);
vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift);
vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift);
vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift);
vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift);
vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift);
vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier);
vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier);
vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier);
vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier);
vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier);
vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier);
vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier);
vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier);
vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier);
vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier);
vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier);
vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier);
vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier);
vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier);
vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier);
vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier);
vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift);
vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift);
vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift);
vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift);
vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift);
vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift);
vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift);
vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift);
vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift);
vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift);
vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift);
vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift);
vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift);
vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift);
vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift);
vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->rndnu_neon.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point);
const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc0x01234567), vacc0x89ABCDEF);
uint8x16_t vout1x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc1x01234567), vacc1x89ABCDEF);
uint8x16_t vout2x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc2x01234567), vacc2x89ABCDEF);
uint8x16_t vout3x0123456789ABCDEF = vqmovun_high_s16(vqmovun_s16(vacc3x01234567), vacc3x89ABCDEF);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point);
const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point);
const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point);
const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point);
const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point);
const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point);
const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point);
uint8x16_t vout0x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc0x01234567), vqmovun_s16(vacc0x89ABCDEF));
uint8x16_t vout1x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc1x01234567), vqmovun_s16(vacc1x89ABCDEF));
uint8x16_t vout2x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc2x01234567), vqmovun_s16(vacc2x89ABCDEF));
uint8x16_t vout3x0123456789ABCDEF = vcombine_u8(vqmovun_s16(vacc3x01234567), vqmovun_s16(vacc3x89ABCDEF));
#endif
const uint8x16_t voutput_min = vld1q_dup_u8(¶ms->rndnu_neon.output_min);
const uint8x16_t voutput_max = vld1q_dup_u8(¶ms->rndnu_neon.output_max);
vout0x0123456789ABCDEF = vmaxq_u8(vout0x0123456789ABCDEF, voutput_min);
vout1x0123456789ABCDEF = vmaxq_u8(vout1x0123456789ABCDEF, voutput_min);
vout2x0123456789ABCDEF = vmaxq_u8(vout2x0123456789ABCDEF, voutput_min);
vout3x0123456789ABCDEF = vmaxq_u8(vout3x0123456789ABCDEF, voutput_min);
vout0x0123456789ABCDEF = vminq_u8(vout0x0123456789ABCDEF, voutput_max);
vout1x0123456789ABCDEF = vminq_u8(vout1x0123456789ABCDEF, voutput_max);
vout2x0123456789ABCDEF = vminq_u8(vout2x0123456789ABCDEF, voutput_max);
vout3x0123456789ABCDEF = vminq_u8(vout3x0123456789ABCDEF, voutput_max);
if (nc >= 16) {
vst1q_u8(c3 + 0, vout3x0123456789ABCDEF);
vst1q_u8(c2 + 0, vout2x0123456789ABCDEF);
vst1q_u8(c1 + 0, vout1x0123456789ABCDEF);
vst1q_u8(c0 + 0, vout0x0123456789ABCDEF);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 16;
} else {
uint8x16_t vout2x01234567_3x01234567 = vcombine_u8(vget_low_u8(vout2x0123456789ABCDEF), vget_low_u8(vout3x0123456789ABCDEF));
uint8x16_t vout0x01234567_1x01234567 = vcombine_u8(vget_low_u8(vout0x0123456789ABCDEF), vget_low_u8(vout1x0123456789ABCDEF));
if (nc & 8) {
vst1_u8(c3, vget_high_u8(vout2x01234567_3x01234567)); c3 += 8;
vst1_u8(c2, vget_low_u8(vout2x01234567_3x01234567)); c2 += 8;
vst1_u8(c1, vget_high_u8(vout0x01234567_1x01234567)); c1 += 8;
vst1_u8(c0, vget_low_u8(vout0x01234567_1x01234567)); c0 += 8;
vout2x01234567_3x01234567 = vcombine_u8(vget_high_u8(vout2x0123456789ABCDEF), vget_high_u8(vout3x0123456789ABCDEF));
vout0x01234567_1x01234567 = vcombine_u8(vget_high_u8(vout0x0123456789ABCDEF), vget_high_u8(vout1x0123456789ABCDEF));
}
if (nc & 4) {
vst1q_lane_u32((void*) c3, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 2); c3 += 4;
vst1q_lane_u32((void*) c2, vreinterpretq_u32_u8(vout2x01234567_3x01234567), 0); c2 += 4;
vst1q_lane_u32((void*) c1, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_u8(vout0x01234567_1x01234567), 0); c0 += 4;
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c3, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 4); c3 += 2;
vst1q_lane_u16((void*) c2, vreinterpretq_u16_u8(vout2x01234567_3x01234567), 0); c2 += 2;
vst1q_lane_u16((void*) c1, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_u8(vout0x01234567_1x01234567), 0); c0 += 2;
vout2x01234567_3x01234567 = vextq_u8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2);
vout0x01234567_1x01234567 = vextq_u8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_u8(c3, vout2x01234567_3x01234567, 8);
vst1q_lane_u8(c2, vout2x01234567_3x01234567, 0);
vst1q_lane_u8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_u8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 21,788
| 56.490765
| 131
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 7,215
| 34.372549
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0);
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1);
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0);
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout2x0 = math_max_s32(vout2x0, vmagic_min);
vout2x1 = math_max_s32(vout2x1, vmagic_min);
vout3x0 = math_max_s32(vout3x0, vmagic_min);
vout3x1 = math_max_s32(vout3x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout2x0 = math_min_s32(vout2x0, vmagic_max);
vout2x1 = math_min_s32(vout2x1, vmagic_max);
vout3x0 = math_min_s32(vout3x0, vmagic_max);
vout3x1 = math_min_s32(vout3x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout2x0 -= vmagic_bias_less_zero_point;
vout2x1 -= vmagic_bias_less_zero_point;
vout3x0 -= vmagic_bias_less_zero_point;
vout3x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,858
| 31.201878
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 7,036
| 33.495098
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 7,373
| 35.147059
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x2-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_4x2__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding;
const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding;
const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding;
const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding;
const int64_t vextacc3x0 = math_mulext_s32(vacc3x0, vmultiplier) + vrounding;
const int64_t vextacc3x1 = math_mulext_s32(vacc3x1, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift);
int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift);
int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift);
int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift);
int32_t vout3x0 = (int32_t) math_asr_s64(vextacc3x0, vshift);
int32_t vout3x1 = (int32_t) math_asr_s64(vextacc3x1, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout1x0 += voutput_zero_point;
vout1x1 += voutput_zero_point;
vout2x0 += voutput_zero_point;
vout2x1 += voutput_zero_point;
vout3x0 += voutput_zero_point;
vout3x1 += voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 7,082
| 35.137755
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point;
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point;
int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c3[2] = (uint8_t) vout3x2;
c3[3] = (uint8_t) vout3x3;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 11,262
| 37.179661
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_imagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_imagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2);
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3);
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0);
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1);
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2);
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3);
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0);
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1);
int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2);
int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout1x2 = math_max_s32(vout1x2, vmagic_min);
vout1x3 = math_max_s32(vout1x3, vmagic_min);
vout2x0 = math_max_s32(vout2x0, vmagic_min);
vout2x1 = math_max_s32(vout2x1, vmagic_min);
vout2x2 = math_max_s32(vout2x2, vmagic_min);
vout2x3 = math_max_s32(vout2x3, vmagic_min);
vout3x0 = math_max_s32(vout3x0, vmagic_min);
vout3x1 = math_max_s32(vout3x1, vmagic_min);
vout3x2 = math_max_s32(vout3x2, vmagic_min);
vout3x3 = math_max_s32(vout3x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout1x2 = math_min_s32(vout1x2, vmagic_max);
vout1x3 = math_min_s32(vout1x3, vmagic_max);
vout2x0 = math_min_s32(vout2x0, vmagic_max);
vout2x1 = math_min_s32(vout2x1, vmagic_max);
vout2x2 = math_min_s32(vout2x2, vmagic_max);
vout2x3 = math_min_s32(vout2x3, vmagic_max);
vout3x0 = math_min_s32(vout3x0, vmagic_max);
vout3x1 = math_min_s32(vout3x1, vmagic_max);
vout3x2 = math_min_s32(vout3x2, vmagic_max);
vout3x3 = math_min_s32(vout3x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout1x2 -= vmagic_bias_less_zero_point;
vout1x3 -= vmagic_bias_less_zero_point;
vout2x0 -= vmagic_bias_less_zero_point;
vout2x1 -= vmagic_bias_less_zero_point;
vout2x2 -= vmagic_bias_less_zero_point;
vout2x3 -= vmagic_bias_less_zero_point;
vout3x0 -= vmagic_bias_less_zero_point;
vout3x1 -= vmagic_bias_less_zero_point;
vout3x2 -= vmagic_bias_less_zero_point;
vout3x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c3[2] = (uint8_t) vout3x2;
c3[3] = (uint8_t) vout3x3;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 10,625
| 33.057692
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_lrintf.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_lrintf.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point);
vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point);
vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point);
vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point);
vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0);
const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1);
const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2);
const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3);
const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0);
const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1);
const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2);
const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
int32_t vout2x0 = vrndacc2x0 + voutput_zero_point;
int32_t vout2x1 = vrndacc2x1 + voutput_zero_point;
int32_t vout2x2 = vrndacc2x2 + voutput_zero_point;
int32_t vout2x3 = vrndacc2x3 + voutput_zero_point;
int32_t vout3x0 = vrndacc3x0 + voutput_zero_point;
int32_t vout3x1 = vrndacc3x1 + voutput_zero_point;
int32_t vout3x2 = vrndacc3x2 + voutput_zero_point;
int32_t vout3x3 = vrndacc3x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c3[2] = (uint8_t) vout3x2;
c3[3] = (uint8_t) vout3x3;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 10,987
| 36.247458
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->fp32_scalar_fmagic.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
float vfpacc2x0 = (float) vacc2x0;
float vfpacc2x1 = (float) vacc2x1;
float vfpacc2x2 = (float) vacc2x2;
float vfpacc2x3 = (float) vacc2x3;
float vfpacc3x0 = (float) vacc3x0;
float vfpacc3x1 = (float) vacc3x1;
float vfpacc3x2 = (float) vacc3x2;
float vfpacc3x3 = (float) vacc3x3;
const float vscale = params->fp32_scalar_fmagic.scale;
vfpacc0x0 *= vscale;
vfpacc0x1 *= vscale;
vfpacc0x2 *= vscale;
vfpacc0x3 *= vscale;
vfpacc1x0 *= vscale;
vfpacc1x1 *= vscale;
vfpacc1x2 *= vscale;
vfpacc1x3 *= vscale;
vfpacc2x0 *= vscale;
vfpacc2x1 *= vscale;
vfpacc2x2 *= vscale;
vfpacc2x3 *= vscale;
vfpacc3x0 *= vscale;
vfpacc3x1 *= vscale;
vfpacc3x2 *= vscale;
vfpacc3x3 *= vscale;
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point);
vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point);
vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point);
vfpacc2x2 = __builtin_wasm_max_f32(vfpacc2x2, voutput_min_less_zero_point);
vfpacc2x3 = __builtin_wasm_max_f32(vfpacc2x3, voutput_min_less_zero_point);
vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point);
vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point);
vfpacc3x2 = __builtin_wasm_max_f32(vfpacc3x2, voutput_min_less_zero_point);
vfpacc3x3 = __builtin_wasm_max_f32(vfpacc3x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point);
vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point);
vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point);
vfpacc2x2 = __builtin_wasm_min_f32(vfpacc2x2, voutput_max_less_zero_point);
vfpacc2x3 = __builtin_wasm_min_f32(vfpacc2x3, voutput_max_less_zero_point);
vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point);
vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point);
vfpacc3x2 = __builtin_wasm_min_f32(vfpacc3x2, voutput_max_less_zero_point);
vfpacc3x3 = __builtin_wasm_min_f32(vfpacc3x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
vfpacc2x0 += vmagic_bias;
vfpacc2x1 += vmagic_bias;
vfpacc2x2 += vmagic_bias;
vfpacc2x3 += vmagic_bias;
vfpacc3x0 += vmagic_bias;
vfpacc3x1 += vmagic_bias;
vfpacc3x2 += vmagic_bias;
vfpacc3x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point;
int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point;
int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point;
int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point;
int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point;
int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point;
int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point;
int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c3[2] = (uint8_t) vout3x2;
c3[3] = (uint8_t) vout3x3;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 11,580
| 38.257627
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4-minmax-rndnu-scalar.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qu8_igemm_minmax_rndnu_ukernel_4x4__scalar(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
const int32_t vb_zero_point = params->rndnu_scalar.kernel_zero_point;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
int32_t vacc2x0 = vacc0x0;
int32_t vacc2x1 = vacc0x1;
int32_t vacc2x2 = vacc0x2;
int32_t vacc2x3 = vacc0x3;
int32_t vacc3x0 = vacc0x0;
int32_t vacc3x1 = vacc0x1;
int32_t vacc3x2 = vacc0x2;
int32_t vacc3x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
assert(a2 != NULL);
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
assert(a3 != NULL);
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
do {
const int32_t va0 = (int32_t) (uint32_t) *a0++;
const int32_t va1 = (int32_t) (uint32_t) *a1++;
const int32_t va2 = (int32_t) (uint32_t) *a2++;
const int32_t va3 = (int32_t) (uint32_t) *a3++;
const int32_t vb0 = (int32_t) (uint32_t) ((const uint8_t*) w)[0] - vb_zero_point;
const int32_t vb1 = (int32_t) (uint32_t) ((const uint8_t*) w)[1] - vb_zero_point;
const int32_t vb2 = (int32_t) (uint32_t) ((const uint8_t*) w)[2] - vb_zero_point;
const int32_t vb3 = (int32_t) (uint32_t) ((const uint8_t*) w)[3] - vb_zero_point;
w = (const void*) ((const uint8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
vacc2x0 += va2 * vb0;
vacc2x1 += va2 * vb1;
vacc2x2 += va2 * vb2;
vacc2x3 += va2 * vb3;
vacc3x0 += va3 * vb0;
vacc3x1 += va3 * vb1;
vacc3x2 += va3 * vb2;
vacc3x3 += va3 * vb3;
k -= sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
const int32_t vmultiplier = params->rndnu_scalar.multiplier;
const int64_t vrounding = params->rndnu_scalar.rounding;
const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding;
const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding;
const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding;
const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding;
const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding;
const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding;
const int64_t vextacc1x2 = math_mulext_s32(vacc1x2, vmultiplier) + vrounding;
const int64_t vextacc1x3 = math_mulext_s32(vacc1x3, vmultiplier) + vrounding;
const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding;
const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding;
const int64_t vextacc2x2 = math_mulext_s32(vacc2x2, vmultiplier) + vrounding;
const int64_t vextacc2x3 = math_mulext_s32(vacc2x3, vmultiplier) + vrounding;
const int64_t vextacc3x0 = math_mulext_s32(vacc3x0, vmultiplier) + vrounding;
const int64_t vextacc3x1 = math_mulext_s32(vacc3x1, vmultiplier) + vrounding;
const int64_t vextacc3x2 = math_mulext_s32(vacc3x2, vmultiplier) + vrounding;
const int64_t vextacc3x3 = math_mulext_s32(vacc3x3, vmultiplier) + vrounding;
const uint32_t vshift = params->rndnu_scalar.shift;
int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift);
int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift);
int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift);
int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift);
int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift);
int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift);
int32_t vout1x2 = (int32_t) math_asr_s64(vextacc1x2, vshift);
int32_t vout1x3 = (int32_t) math_asr_s64(vextacc1x3, vshift);
int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift);
int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift);
int32_t vout2x2 = (int32_t) math_asr_s64(vextacc2x2, vshift);
int32_t vout2x3 = (int32_t) math_asr_s64(vextacc2x3, vshift);
int32_t vout3x0 = (int32_t) math_asr_s64(vextacc3x0, vshift);
int32_t vout3x1 = (int32_t) math_asr_s64(vextacc3x1, vshift);
int32_t vout3x2 = (int32_t) math_asr_s64(vextacc3x2, vshift);
int32_t vout3x3 = (int32_t) math_asr_s64(vextacc3x3, vshift);
const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point;
vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point);
vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point);
vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point);
vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point);
vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point);
vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point);
vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point);
vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point);
vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point);
vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point);
vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point);
vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point);
vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point);
vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point);
vout3x2 = math_max_s32(vout3x2, voutput_min_less_zero_point);
vout3x3 = math_max_s32(vout3x3, voutput_min_less_zero_point);
const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point;
vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point);
vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point);
vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point);
vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point);
vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point);
vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point);
vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point);
vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point);
vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point);
vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point);
vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point);
vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point);
vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point);
vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point);
vout3x2 = math_min_s32(vout3x2, voutput_max_less_zero_point);
vout3x3 = math_min_s32(vout3x3, voutput_max_less_zero_point);
const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point;
vout0x0 += voutput_zero_point;
vout0x1 += voutput_zero_point;
vout0x2 += voutput_zero_point;
vout0x3 += voutput_zero_point;
vout1x0 += voutput_zero_point;
vout1x1 += voutput_zero_point;
vout1x2 += voutput_zero_point;
vout1x3 += voutput_zero_point;
vout2x0 += voutput_zero_point;
vout2x1 += voutput_zero_point;
vout2x2 += voutput_zero_point;
vout2x3 += voutput_zero_point;
vout3x0 += voutput_zero_point;
vout3x1 += voutput_zero_point;
vout3x2 += voutput_zero_point;
vout3x3 += voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
c3[2] = (uint8_t) vout3x2;
c3[3] = (uint8_t) vout3x3;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
c2[2] = (uint8_t) vout2x2;
c2[3] = (uint8_t) vout2x3;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
c1[2] = (uint8_t) vout1x2;
c1[3] = (uint8_t) vout1x3;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
c0[2] = (uint8_t) vout0x2;
c0[3] = (uint8_t) vout0x3;
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c3[0] = (uint8_t) vout3x0;
c3[1] = (uint8_t) vout3x1;
vout3x0 = vout3x2;
c3 += 2;
c2[0] = (uint8_t) vout2x0;
c2[1] = (uint8_t) vout2x1;
vout2x0 = vout2x2;
c2 += 2;
c1[0] = (uint8_t) vout1x0;
c1[1] = (uint8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (uint8_t) vout0x0;
c0[1] = (uint8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c3[0] = (uint8_t) vout3x0;
c2[0] = (uint8_t) vout2x0;
c1[0] = (uint8_t) vout1x0;
c0[0] = (uint8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 11,001
| 38.433692
| 96
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,746
| 41.716364
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,839
| 41.898551
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_unpacklo_epi8(va3, vzero);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_unpacklo_epi8(va3, vzero);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3))));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi16(vout, 6);
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 11,947
| 42.447273
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_unpacklo_epi8(va3, vzero);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_unpacklo_epi8(va3, vzero);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3))));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi16(vout, 6);
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 12,117
| 42.747292
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,748
| 41.723636
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc2x0123 = _mm_add_epi32(vacc2x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc3x0123 = _mm_add_epi32(vacc3x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,841
| 41.905797
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
const v128_t vxa3 = wasm_u16x8_load8x8(a3);
a3 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const v128_t vxa3 = wasm_u16x8_load8x8(a3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c3, vout, 3);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c3, vout, 12);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,063
| 39.826568
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
const v128_t vxa3 = wasm_u16x8_load8x8(a3);
a3 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const v128_t vxa3 = wasm_u16x8_load8x8(a3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(uint8_t)) {
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(uint8_t)) {
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
w = (const void*) ((const uint8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc2x0123 = wasm_i32x4_add(vacc2x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
vacc3x0123 = wasm_i32x4_add(vacc3x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c3, vout, 3);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c3, vout, 12);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,961
| 39.750929
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,435
| 39.989247
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
while (k >= 8 * sizeof(uint8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123);
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 = (const uint8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 = (const uint8_t*) ((uintptr_t) a1 + k);
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
const __m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 = (const uint8_t*) ((uintptr_t) a2 + k);
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
const __m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 = (const uint8_t*) ((uintptr_t) a3 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123);
if (k > 2 * sizeof(uint8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123);
if (k > 4 * sizeof(uint8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const uint8_t*) w + 8);
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc2x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123);
vacc3x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123);
}
}
}
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,528
| 40.175
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,582
| 39.107477
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,675
| 39.353488
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_unpacklo_epi8(va3, vzero);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3))));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi16(vout, 6);
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 8,734
| 39.817757
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_unpacklo_epi8(va2, vzero);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_unpacklo_epi8(va3, vzero);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3))));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2))));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi16(vout, 6);
*c2 = (uint8_t) _mm_extract_epi16(vout, 4);
*c1 = (uint8_t) _mm_extract_epi16(vout, 2);
*c0 = (uint8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 8,904
| 40.226852
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
const __m128i vzero = _mm_setzero_si128();
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb01, vzero), vb_zero_point);
const __m128i vxb1 = _mm_sub_epi16(_mm_unpackhi_epi8(vb01, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb23, vzero), vb_zero_point);
const __m128i vxb3 = _mm_sub_epi16(_mm_unpackhi_epi8(vb23, vzero), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,584
| 39.116822
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
__m128i vacc2x0123 = vacc0x0123;
__m128i vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
size_t k = kc;
const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.kernel_zero_point);
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepu8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepu8_epi16(va1);
a1 += 8;
const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
__m128i vxa2 = _mm_cvtepu8_epi16(va2);
a2 += 8;
const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3);
__m128i vxa3 = _mm_cvtepu8_epi16(va3);
a3 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb0), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 8));
const __m128i vxb1 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb1), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 16));
const __m128i vxb2 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb2), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2));
vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2));
vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const uint8_t*) w + 24));
const __m128i vxb3 = _mm_sub_epi16(_mm_cvtepu8_epi16(vb3), vb_zero_point);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3));
vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3));
w = (const void*) ((const uint8_t*) w + 32);
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
__m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
__m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123);
const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point);
__m128i vout = _mm_packus_epi16(vacc01x0123, vacc23x0123);
vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->fp32_sse2.output_min));
if (nc >= 4) {
unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3));
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2));
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6));
c3 += 2;
unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4));
c2 += 2;
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c3 = (uint8_t) _mm_extract_epi8(vout, 12);
*c2 = (uint8_t) _mm_extract_epi8(vout, 8);
*c1 = (uint8_t) _mm_extract_epi8(vout, 4);
*c0 = (uint8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,677
| 39.362791
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
v128_t vxa3 = wasm_u16x8_load8x8(a3);
a3 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb01), vb_zero_point);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb01), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const uint8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb23), vb_zero_point);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb23), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c3, vout, 3);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c3, vout, 12);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,367
| 37.562212
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qu8-igemm/gen/qu8-igemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qu8_igemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const uint8_t** restrict a,
const void* restrict w,
uint8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const uint8_t* zero,
const union xnn_qu8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 4);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (4 * sizeof(void*)) == 0);
assert(a_offset % sizeof(uint8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
uint8_t* c0 = c;
uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr < 2) {
c1 = c0;
}
uint8_t* c2 = (uint8_t*) ((uintptr_t) c1 + cm_stride);
if XNN_UNPREDICTABLE(mr <= 2) {
c2 = c1;
}
uint8_t* c3 = (uint8_t*) ((uintptr_t) c2 + cm_stride);
if XNN_UNPREDICTABLE(mr != 4) {
c3 = c2;
}
kc = round_up_po2(kc, 8 * sizeof(uint8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
v128_t vacc2x0123 = vacc0x0123;
v128_t vacc3x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const uint8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
}
const uint8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
}
const uint8_t* restrict a2 = a[2];
if XNN_UNPREDICTABLE(a2 != zero) {
a2 = (const uint8_t*) ((uintptr_t) a2 + a_offset);
}
const uint8_t* restrict a3 = a[3];
if XNN_UNPREDICTABLE(a3 != zero) {
a3 = (const uint8_t*) ((uintptr_t) a3 + a_offset);
}
a += 4;
const v128_t vb_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.kernel_zero_point);
size_t k = kc;
do {
v128_t vxa0 = wasm_u16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_u16x8_load8x8(a1);
a1 += 8;
v128_t vxa2 = wasm_u16x8_load8x8(a2);
a2 += 8;
v128_t vxa3 = wasm_u16x8_load8x8(a3);
a3 += 8;
const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 8), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 16), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2));
vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4);
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2));
vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_sub(wasm_u16x8_load8x8((const uint8_t*) w + 24), vb_zero_point);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3));
vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3));
w = (const uint8_t*) w + 32;
k -= 8 * sizeof(uint8_t);
} while (k != 0);
p -= 4 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale);
vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale);
vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
v128_t vout = wasm_u8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_u8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c3, vout, 3);
wasm_v128_store32_lane(c2, vout, 2);
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c3 = (uint8_t*) ((uintptr_t) c3 + cn_stride);
c2 = (uint8_t*) ((uintptr_t) c2 + cn_stride);
c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
a = (const uint8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c3, vout, 6);
c3 += 2;
wasm_v128_store16_lane(c2, vout, 4);
c2 += 2;
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c3, vout, 12);
wasm_v128_store8_lane(c2, vout, 8);
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,265
| 37.446512
| 134
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.