repo
stringlengths 1
152
⌀ | file
stringlengths 14
221
| code
stringlengths 501
25k
| file_length
int64 501
25k
| avg_line_length
float64 20
99.5
| max_line_length
int64 21
134
| extension_type
stringclasses 2
values |
|---|---|---|---|---|---|---|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,274
| 31.96875
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,392
| 32.290123
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,577
| 33.220859
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,667
| 33.773006
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,276
| 31.98125
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,394
| 32.302469
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,982
| 30.339623
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,878
| 30.076433
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,236
| 30.932927
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,354
| 31.259036
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,007
| 30.069767
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,125
| 30.496183
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,214
| 30.931818
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,304
| 31.613636
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,009
| 30.085271
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,127
| 30.51145
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,976
| 29.128788
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
v128_t vacc00x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc0x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc00x0123, vacc00x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c0, vout, 0);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 3,872
| 28.792308
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,004
| 29.112782
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 1 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,122
| 29.540741
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,139
| 30.363636
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,261
| 30.338235
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,519
| 32.481481
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc00x0123 = _mm_max_epi16(vacc00x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 4,613
| 32.678832
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,141
| 30.378788
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,263
| 30.352941
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,136
| 29.419118
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x4c8-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc00x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc0x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc00x0123, vacc00x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 4,258
| 29.421429
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,153
| 43.147826
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,065
| 43.149123
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-neonv8-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__neonv8_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,009
| 42.71179
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8-minmax-fp32-neonv8-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8__neonv8_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
}
}
}
}
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,921
| 42.709251
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,329
| 47.164063
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,558
| 46.392453
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,238
| 46.254826
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neon-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neon_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,009
| 45.914063
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neonv8-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,185
| 46.788235
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neonv8-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,414
| 46.026515
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neonv8-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 12,094
| 45.879845
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2-minmax-fp32-neonv8-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2__neonv8_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
}
}
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,865
| 45.533333
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2s4-minmax-fp32-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2s4__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,707
| 40.665072
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c2s4-minmax-fp32-neonv8-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c2s4__neonv8_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
va0x1 = vext_s8(va0x1, va0x1, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
va0x0 = vext_s8(va0x0, va0x0, 2);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,563
| 40.173077
| 105
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,589
| 46.306122
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,630
| 45.899194
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,461
| 45.783673
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neondot.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neondot.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neondot(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4;
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
// Inner accumulation loop along the 8 columns.
size_t k = kc;
// 2x partial unrolled loop to load 8 bytes at a time.
while (k >= 8 * sizeof(int8_t)) {
// Load a 1x8 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8;
// Load a 8x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x8 * 8x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1);
k -= 8 * sizeof(int8_t);
}
// Handle up to 4 final positions of `k`
if XNN_UNLIKELY(k != 0) {
// Load a 1x4 block of activations.
const int8x8_t va0x01234567 = vld1_s8(a0);
// Load a 4x8 block of weights.
const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16;
const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16;
// Multiply-accumulate: 1x4 * 4x8 --> 1x8.
vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0);
vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const float*) w + 4;
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,104
| 33.261745
| 130
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neonv8-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0));
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1));
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1));
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,445
| 45.909836
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neonv8-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2_t va00x0 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int32x2_t va00x1 = vld1_dup_s32((const void*)a0);
const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2_t va00 = vld1_dup_s32((const void*)a0);
const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va00);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va01);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,486
| 45.506073
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4-minmax-fp32-neonv8-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4__neonv8_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]);
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8;
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]);
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]);
const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0));
const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 11,317
| 45.385246
| 128
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4s2-minmax-fp32-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4s2__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 9,036
| 40.645161
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c4s2-minmax-fp32-neonv8-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c4s2__neonv8_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
va0x1 = vext_s8(va0x1, va0x1, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8;
vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
k -= 16 * sizeof(int8_t);
}
if (k != 0) {
int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8;
int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0);
int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0);
int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0);
int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0);
va0x0 = vext_s8(va0x0, va0x0, 4);
int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0);
vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1);
int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0);
vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1);
int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0);
vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1);
int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0);
vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67);
#else
const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01));
const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23));
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23);
const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45));
const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67));
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67);
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32(w); w = (const float*) w + 4;
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32(w); w = (const float*) w + 4;
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 8,892
| 40.171296
| 99
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-avx2.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx8c8-avx2.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <immintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__avx2(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1);
const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1);
const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]);
const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]);
__m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1);
const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]);
const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]);
__m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1);
w = (const int32_t*) w + 8;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0));
const __m256i vxa0 = _mm256_cvtepi8_epi16(va0);
a0 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m256i vxb01 = _mm256_cvtepi8_epi16(vb01);
vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m256i vxb23 = _mm256_cvtepi8_epi16(vb23);
vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23));
const __m128i vb45 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 32));
const __m256i vxb45 = _mm256_cvtepi8_epi16(vb45);
vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45));
const __m128i vb67 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 48));
const __m256i vxb67 = _mm256_cvtepi8_epi16(vb67);
vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67));
w = (const void*) ((const int8_t*) w + 64);
k += 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23);
const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67);
const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657);
const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0);
__m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask);
__m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567);
const __m256 vscale01234567 = _mm256_load_ps(w);
w = (const void*) ((const float*) w + 8);
vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale01234567);
const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point);
vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point);
vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567);
const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point);
__m256i vacc00x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc0x01234567), voutput_zero_point);
vacc00x01234567 = _mm256_permute4x64_epi64(vacc00x01234567, _MM_SHUFFLE(3, 1, 2, 0));
__m256i vout = _mm256_packs_epi16(vacc00x01234567, vacc00x01234567);
vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min));
__m128i vout_lo = _mm256_castsi256_si128(vout);
__m128i vout_hi = _mm256_extracti128_si256(vout, 1);
if (nc >= 8) {
_mm_storel_epi64((__m128i*) c0, vout_lo);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
_mm_storeu_si32(c0, vout_lo);
c0 += 4;
vout_lo = _mm_srli_epi64(vout_lo, 32);
vout_hi = _mm_srli_epi64(vout_hi, 32);
}
if (nc & 2) {
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0));
c0 += 2;
vout_lo = _mm_srli_epi32(vout_lo, 16);
vout_hi = _mm_srli_epi32(vout_hi, 16);
}
if (nc & 1) {
*c0 = (int8_t) _mm_extract_epi8(vout_lo, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,765
| 34.158537
| 120
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-neon-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__neon_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
// 2x partial unrolled loop to load 16 bytes at a time using MLA.
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
k -= 16 * sizeof(int8_t);
}
// Handle 8 bytes at a time using MUL.
if (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
k -= 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,892
| 45.551282
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-1x8c8-minmax-fp32-neonv8-mlal.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c8-neon-mull.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_1x8c8__neonv8_mlal(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 1);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (1 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
do {
int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t));
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
a += 1;
size_t k = kc;
// 2x partial unrolled loop to load 16 bytes at a time using MLA.
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0);
vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0);
vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0);
vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0);
vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0);
vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0);
vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0);
vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t));
int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0);
vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
k -= 16 * sizeof(int8_t);
}
// Handle 8 bytes at a time using MUL.
if (k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x0 = vmull_s8(vb0, va0);
vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0);
const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x1 = vmull_s8(vb1, va0);
vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1);
const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x2 = vmull_s8(vb2, va0);
vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2);
const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x3 = vmull_s8(vb3, va0);
vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3);
const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x4 = vmull_s8(vb4, va0);
vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4);
const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x5 = vmull_s8(vb5, va0);
vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5);
const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x6 = vmull_s8(vb6, va0);
vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6);
const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int16x8_t vprod0x7 = vmull_s8(vb7, va0);
vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7);
k -= 8 * sizeof(int8_t);
}
p -= 1 * sizeof(void*);
} while (p != 0);
#if XNN_ARCH_ARM64
const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1);
const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3);
const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5);
const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7);
int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23);
int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67);
#else
const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0));
const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1));
const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2));
const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3));
const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1);
const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3);
int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 );
const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4));
const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5));
const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6));
const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7));
const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5);
const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7);
int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 );
#endif
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
int8x8_t vout0x01234567 = vqmovn_s16(vacc0x01234567);
#endif
const int8x8_t voutput_min = vld1_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567 = vmax_s8(vout0x01234567, voutput_min);
const int8x8_t voutput_max = vld1_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567 = vmin_s8(vout0x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c0 + 0, vout0x01234567);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1_lane_u32((void*) c0, vreinterpret_u32_s8(vout0x01234567), 0); c0 += 4;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 4);
}
if (nc & 2) {
vst1_lane_u16((void*) c0, vreinterpret_u16_s8(vout0x01234567), 0); c0 += 2;
vout0x01234567 = vext_s8(vout0x01234567, vout0x01234567, 2);
}
if (nc & 1) {
vst1_lane_s8(c0, vout0x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 10,748
| 45.133047
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x1c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x1c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc1x0 = vacc0x0;
w = (const void*) ((const int32_t*) w + 1);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int8x4_t va1 = (int8x4_t) unaligned_load_s32(a1); a1 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int16x2_t va1c02 = __sxtb16(va1);
const int16x2_t va1c13 = __sxtb16(__ror(va1, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
vacc1x0 = __smlad(va1c02, vb0c02, vacc1x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0);
k -= 4 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc1x0 = (float) vacc1x0;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
w = (const void*) ((const float*) w + 1);
vfpacc0x0 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout1x0 = __qsub(vout1x0, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
vout1x0 = __ssat(vout1x0, 8);
const uint32_t vout0 = (uint32_t) vout0x0;
const uint32_t vout1 = (uint32_t) vout1x0;
uint32_t vout = (uint32_t) (uint16_t) vout1 | (vout0 << 16);
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
*c1 = (int8_t) vout;
vout >>= 16;
*c0 = (int8_t) vout;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 1;
} while (nc != 0);
}
| 4,194
| 28.751773
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x2-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x2__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,608
| 30.786207
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x2-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x2__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,391
| 28.28
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x2-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x2__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,477
| 29.882759
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x2-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x2__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0);
int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1);
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
w = (const void*) ((const int8_t*) w + 2);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
const float vscale0 = unaligned_indexed_load_f32(w, 0);
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = unaligned_indexed_load_f32(w, 1);
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 4,686
| 31.324138
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x2c4-minmax-fp32-armsimd32.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c4-armsimd32.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_acle.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x2c4__armsimd32(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 4 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
const float vmagic_bias = params->fp32_armsimd32.magic_bias;
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
w = (const void*) ((const int32_t*) w + 2);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int8x4_t va0 = (int8x4_t) unaligned_load_s32(a0); a0 += 4;
const int8x4_t va1 = (int8x4_t) unaligned_load_s32(a1); a1 += 4;
const int16x2_t va0c02 = __sxtb16(va0);
const int16x2_t va0c13 = __sxtb16(__ror(va0, 8));
const int16x2_t va1c02 = __sxtb16(va1);
const int16x2_t va1c13 = __sxtb16(__ror(va1, 8));
const int8x4_t vb0 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb0c02 = __sxtb16(vb0);
vacc0x0 = __smlad(va0c02, vb0c02, vacc0x0);
vacc1x0 = __smlad(va1c02, vb0c02, vacc1x0);
const int16x2_t vb0c13 = __sxtb16(__ror(vb0, 8));
vacc0x0 = __smlad(va0c13, vb0c13, vacc0x0);
vacc1x0 = __smlad(va1c13, vb0c13, vacc1x0);
const int8x4_t vb1 = *((const int8x4_t*) w); w = (const int8_t*) w + 4;
const int16x2_t vb1c02 = __sxtb16(vb1);
vacc0x1 = __smlad(va0c02, vb1c02, vacc0x1);
vacc1x1 = __smlad(va1c02, vb1c02, vacc1x1);
const int16x2_t vb1c13 = __sxtb16(__ror(vb1, 8));
vacc0x1 = __smlad(va0c13, vb1c13, vacc0x1);
vacc1x1 = __smlad(va1c13, vb1c13, vacc1x1);
k -= 4 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
w = (const void*) ((const float*) w + 2);
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
const int32_t vmagic_bias_less_zero_point = params->fp32_armsimd32.magic_bias_less_zero_point;
vout0x0 = __qsub(vout0x0, vmagic_bias_less_zero_point);
vout0x1 = __qsub(vout0x1, vmagic_bias_less_zero_point);
vout1x0 = __qsub(vout1x0, vmagic_bias_less_zero_point);
vout1x1 = __qsub(vout1x1, vmagic_bias_less_zero_point);
vout0x0 = __ssat(vout0x0, 8);
vout0x1 = __ssat(vout0x1, 8);
vout1x0 = __ssat(vout1x0, 8);
vout1x1 = __ssat(vout1x1, 8);
const uint32_t vout0 = (uint32_t) (uint8_t) vout0x0 | ((uint32_t) vout0x1 << 8);
const uint32_t vout1 = (uint32_t) (uint8_t) vout1x0 | ((uint32_t) vout1x1 << 8);
uint32_t vout = (uint32_t) (uint16_t) vout1 | (vout0 << 16);
const int8x4_t voutput_min = (int8x4_t) params->fp32_armsimd32.output_min;
__ssub8((int8x4_t) vout, voutput_min);
vout = (uint32_t) __sel((uint8x4_t) vout, (uint8x4_t) voutput_min);
const int8x4_t voutput_max = (int8x4_t) params->fp32_armsimd32.output_max;
__ssub8((int8x4_t) vout, voutput_max);
vout = (uint32_t) __sel((uint8x4_t) voutput_max, (uint8x4_t) vout);
if XNN_LIKELY(nc >= 2) {
unaligned_store_u16(c1, (uint16_t) vout);
vout >>= 16;
unaligned_store_u16(c0, (uint16_t) vout);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 2;
} else {
*c1 = (int8_t) vout;
vout >>= 16;
*c0 = (int8_t) vout;
nc = 0;
}
} while (nc != 0);
}
| 5,483
| 30.699422
| 98
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4-minmax-fp32-scalar-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4__scalar_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
vfpacc1x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,743
| 33.762887
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4-minmax-fp32-scalar-imagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4__scalar_imagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
vfpacc1x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0);
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1);
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2);
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3);
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0);
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1);
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2);
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3);
const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
vout0x0 = math_max_s32(vout0x0, vmagic_min);
vout0x1 = math_max_s32(vout0x1, vmagic_min);
vout0x2 = math_max_s32(vout0x2, vmagic_min);
vout0x3 = math_max_s32(vout0x3, vmagic_min);
vout1x0 = math_max_s32(vout1x0, vmagic_min);
vout1x1 = math_max_s32(vout1x1, vmagic_min);
vout1x2 = math_max_s32(vout1x2, vmagic_min);
vout1x3 = math_max_s32(vout1x3, vmagic_min);
const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
vout0x0 = math_min_s32(vout0x0, vmagic_max);
vout0x1 = math_min_s32(vout0x1, vmagic_max);
vout0x2 = math_min_s32(vout0x2, vmagic_max);
vout0x3 = math_min_s32(vout0x3, vmagic_max);
vout1x0 = math_min_s32(vout1x0, vmagic_max);
vout1x1 = math_min_s32(vout1x1, vmagic_max);
vout1x2 = math_min_s32(vout1x2, vmagic_max);
vout1x3 = math_min_s32(vout1x3, vmagic_max);
const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
vout0x0 -= vmagic_bias_less_zero_point;
vout0x1 -= vmagic_bias_less_zero_point;
vout0x2 -= vmagic_bias_less_zero_point;
vout0x3 -= vmagic_bias_less_zero_point;
vout1x0 -= vmagic_bias_less_zero_point;
vout1x1 -= vmagic_bias_less_zero_point;
vout1x2 -= vmagic_bias_less_zero_point;
vout1x3 -= vmagic_bias_less_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,386
| 30.463054
| 102
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4-minmax-fp32-scalar-lrintf.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <math.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4__scalar_lrintf(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
vfpacc1x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point;
vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point;
vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point);
const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0);
const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1);
const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2);
const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3);
const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0);
const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1);
const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2);
const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3);
const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point;
int32_t vout0x0 = vrndacc0x0 + voutput_zero_point;
int32_t vout0x1 = vrndacc0x1 + voutput_zero_point;
int32_t vout0x2 = vrndacc0x2 + voutput_zero_point;
int32_t vout0x3 = vrndacc0x3 + voutput_zero_point;
int32_t vout1x0 = vrndacc1x0 + voutput_zero_point;
int32_t vout1x1 = vrndacc1x1 + voutput_zero_point;
int32_t vout1x2 = vrndacc1x2 + voutput_zero_point;
int32_t vout1x3 = vrndacc1x3 + voutput_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,564
| 32.840206
| 100
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4-minmax-fp32-wasm-fmagic.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/scalar.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <xnnpack/math.h>
#include <xnnpack/gemm.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4__wasm_fmagic(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32_t vacc0x0 = ((const int32_t*) w)[0];
int32_t vacc0x1 = ((const int32_t*) w)[1];
int32_t vacc0x2 = ((const int32_t*) w)[2];
int32_t vacc0x3 = ((const int32_t*) w)[3];
int32_t vacc1x0 = vacc0x0;
int32_t vacc1x1 = vacc0x1;
int32_t vacc1x2 = vacc0x2;
int32_t vacc1x3 = vacc0x3;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
assert(a0 != NULL);
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
assert(a1 != NULL);
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const int32_t va0 = (int32_t) *a0++;
const int32_t va1 = (int32_t) *a1++;
const int32_t vb0 = (int32_t) ((const int8_t*) w)[0];
const int32_t vb1 = (int32_t) ((const int8_t*) w)[1];
const int32_t vb2 = (int32_t) ((const int8_t*) w)[2];
const int32_t vb3 = (int32_t) ((const int8_t*) w)[3];
w = (const void*) ((const int8_t*) w + 4);
vacc0x0 += va0 * vb0;
vacc0x1 += va0 * vb1;
vacc0x2 += va0 * vb2;
vacc0x3 += va0 * vb3;
vacc1x0 += va1 * vb0;
vacc1x1 += va1 * vb1;
vacc1x2 += va1 * vb2;
vacc1x3 += va1 * vb3;
k -= sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
float vfpacc0x0 = (float) vacc0x0;
float vfpacc0x1 = (float) vacc0x1;
float vfpacc0x2 = (float) vacc0x2;
float vfpacc0x3 = (float) vacc0x3;
float vfpacc1x0 = (float) vacc1x0;
float vfpacc1x1 = (float) vacc1x1;
float vfpacc1x2 = (float) vacc1x2;
float vfpacc1x3 = (float) vacc1x3;
const float vscale0 = ((const float*) w)[0];
vfpacc0x0 *= vscale0;
vfpacc1x0 *= vscale0;
const float vscale1 = ((const float*) w)[1];
vfpacc0x1 *= vscale1;
vfpacc1x1 *= vscale1;
const float vscale2 = ((const float*) w)[2];
vfpacc0x2 *= vscale2;
vfpacc1x2 *= vscale2;
const float vscale3 = ((const float*) w)[3];
vfpacc0x3 *= vscale3;
vfpacc1x3 *= vscale3;
w = (const void*) ((const float*) w + 4);
const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point;
vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point);
vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point);
vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point);
vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point);
vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point);
vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point);
vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point);
vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point);
const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point;
vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point);
vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point);
vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point);
vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point);
vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point);
vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point);
vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point);
vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point);
const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias;
vfpacc0x0 += vmagic_bias;
vfpacc0x1 += vmagic_bias;
vfpacc0x2 += vmagic_bias;
vfpacc0x3 += vmagic_bias;
vfpacc1x0 += vmagic_bias;
vfpacc1x1 += vmagic_bias;
vfpacc1x2 += vmagic_bias;
vfpacc1x3 += vmagic_bias;
const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point;
int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point;
int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point;
int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point;
int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point;
int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point;
int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point;
int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point;
int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point;
if XNN_LIKELY(nc >= 4) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
c1[2] = (int8_t) vout1x2;
c1[3] = (int8_t) vout1x3;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
c0[2] = (int8_t) vout0x2;
c0[3] = (int8_t) vout0x3;
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
c1[0] = (int8_t) vout1x0;
c1[1] = (int8_t) vout1x1;
vout1x0 = vout1x2;
c1 += 2;
c0[0] = (int8_t) vout0x0;
c0[1] = (int8_t) vout0x1;
vout0x0 = vout0x2;
c0 += 2;
}
if (nc & 1) {
c1[0] = (int8_t) vout1x0;
c0[0] = (int8_t) vout0x0;
}
nc = 0;
}
} while (nc != 0);
}
| 6,901
| 34.57732
| 116
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,292
| 35.833333
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,410
| 36.055
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 7,685
| 37.238806
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 7,775
| 37.686567
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,294
| 35.843434
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0));
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1));
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
vacc1x0123 = _mm_add_epi32(vacc1x0123,
_mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,412
| 36.065
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,880
| 34.107143
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const v128_t vxb0 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
if (k > 2 * sizeof(int8_t)) {
const v128_t vxb1 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
if (k > 4 * sizeof(int8_t)) {
const v128_t vxb2 = wasm_i16x8_load8x8(w);
w = (const void*) ((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
vacc1x0123 = wasm_i32x4_add(vacc1x0123,
wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 6,776
| 33.93299
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,163
| 34.465347
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
}
if (k != 0) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 = (const int8_t*) ((uintptr_t) a0 + k);
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 = (const int8_t*) ((uintptr_t) a1 + k);
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123);
if (k > 2 * sizeof(int8_t)) {
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123);
if (k > 4 * sizeof(int8_t)) {
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w);
w = (const void*) ((const int8_t*) w + 8);
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(
_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 7,281
| 34.696078
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,426
| 33.566879
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,544
| 33.874214
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,699
| 34.625
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 5,789
| 35.1875
| 110
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,428
| 33.579618
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2));
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2));
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3));
vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,546
| 33.886792
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__wasmsimd_dot16x2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 += 8;
const v128_t vb01 = wasm_v128_load(w);
const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01);
const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16);
const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23);
const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,345
| 32.4125
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <wasm_simd128.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__wasmsimd_dot16x2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
kc = round_up_po2(kc, 8 * sizeof(int8_t));
do {
v128_t vacc0x0123 = wasm_v128_load(w);
v128_t vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
v128_t vxa0 = wasm_i16x8_load8x8(a0);
a0 += 8;
v128_t vxa1 = wasm_i16x8_load8x8(a1);
a1 += 8;
const v128_t vxb0 = wasm_i16x8_load8x8(w);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2));
vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4);
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2));
vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4);
const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3));
vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3));
w = (const int8_t*) w + 32;
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
const v128_t vscale0123 = wasm_v128_load(w);
w = (const void*) ((const float*) w + 4);
vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123);
const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc01x0123);
const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
vout = wasm_i8x16_min(vout, voutput_max);
if (nc >= 4) {
wasm_v128_store32_lane(c1, vout, 1);
wasm_v128_store32_lane(c0, vout, 0);
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
wasm_v128_store16_lane(c1, vout, 2);
c1 += 2;
wasm_v128_store16_lane(c0, vout, 0);
c0 += 2;
vout = wasm_u32x4_shr(vout, 16);
}
if (nc & 1) {
wasm_v128_store8_lane(c1, vout, 4);
wasm_v128_store8_lane(c0, vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,241
| 32.177215
| 134
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_loadu_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,367
| 32.341615
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c2s4-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c2s4-sse.c.in
// Generator: tools/xngen
//
// Copyright 2022 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c2s4__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w);
__m128i vacc1x0123 = vacc0x0123;
w = (const void*) ((const int32_t*) w + 4);
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
do {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
__m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
__m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123);
vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1));
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123);
vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123);
vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123);
w = (const void*) ((const int8_t*) w + 32);
k -= 8 * sizeof(int8_t);
} while (k != 0);
p -= 2 * sizeof(void*);
} while (p != 0);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_loadu_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,485
| 32.656442
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-avx-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__avx_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,619
| 33.478528
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-avx-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__avx_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,741
| 33.383234
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-sse2-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__sse2_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01);
const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01);
const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23);
const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23);
const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 6,238
| 36.584337
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-sse2-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <emmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__sse2_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2));
const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3));
const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2));
const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3));
__m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13));
__m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13));
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min);
vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1))));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi16(vout, 2);
*c0 = (int8_t) _mm_cvtsi128_si32(vout);
}
nc = 0;
}
} while (nc != 0);
}
| 6,332
| 36.696429
| 119
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-sse41-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__sse41_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,621
| 33.490798
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-sse41-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <smmintrin.h>
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__sse41_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,743
| 33.39521
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-xop-ld128.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__xop_ld128(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb01 = _mm_load_si128((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb01);
const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb23);
const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,560
| 32.299401
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x4c8-minmax-fp32-xop-ld64.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/MRx4c8-sse.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#ifdef _MSC_VER
#include <intrin.h>
#else
#include <x86intrin.h>
#endif
#include <xnnpack/igemm.h>
#include <xnnpack/math.h>
#include <xnnpack/unaligned.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x4c8__xop_ld64(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 8 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
__m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]);
__m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]);
__m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]);
__m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]);
__m128i vacc1x0 = vacc0x0;
__m128i vacc1x1 = vacc0x1;
__m128i vacc1x2 = vacc0x2;
__m128i vacc1x3 = vacc0x3;
w = (const int32_t*) w + 4;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = 0;
while (k < kc) {
const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
a0 += 8;
const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
a1 += 8;
const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
const __m128i vxb0 = _mm_cvtepi8_epi16(vb0);
vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0);
vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0);
const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8));
const __m128i vxb1 = _mm_cvtepi8_epi16(vb1);
vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1);
vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1);
const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16));
const __m128i vxb2 = _mm_cvtepi8_epi16(vb2);
vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2);
vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2);
const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24));
const __m128i vxb3 = _mm_cvtepi8_epi16(vb3);
vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3);
vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3);
w = (const void*) ((const int8_t*) w + 32);
k += 8 * sizeof(int8_t);
}
p -= 2 * sizeof(void*);
} while (p != 0);
const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
__m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
__m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
__m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
__m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
const __m128 vscale0123 = _mm_load_ps((const float*) w);
w = (const void*) ((const float*) w + 4);
vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale0123);
vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale0123);
const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
__m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
__m128i vout = _mm_packs_epi16(vacc01x0123, vacc01x0123);
vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
if (nc >= 4) {
unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout));
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 4;
} else {
if (nc & 2) {
unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2));
c1 += 2;
unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0));
c0 += 2;
vout = _mm_srli_epi32(vout, 16);
}
if (nc & 1) {
*c1 = (int8_t) _mm_extract_epi8(vout, 4);
*c0 = (int8_t) _mm_extract_epi8(vout, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 5,682
| 32.233918
| 108
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8-minmax-fp32-neon-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8__neon_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
}
}
}
}
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,214
| 51.647059
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8-minmax-fp32-neon-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8__neon_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
}
}
}
}
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 15,126
| 51.707317
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8-minmax-fp32-neonv8-mlal-lane-prfm.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
#include <xnnpack/prefetch.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8__neonv8_mlal_lane_prfm(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
xnn_prefetch_to_l1((const int8_t*) w + 448);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
}
}
}
}
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,992
| 51.059028
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8-minmax-fp32-neonv8-mlal-lane.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/neon-mlal-lane.c.in
// Generator: tools/xngen
//
// Copyright 2020 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/common.h>
#include <xnnpack/igemm.h>
#include <xnnpack/intrinsics-polyfill.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8__neonv8_mlal_lane(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4);
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int16x8_t vxa0 = vmovl_s8(va0);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int16x8_t vxa1 = vmovl_s8(va1);
const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0);
if (k >= 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2);
if (k >= 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0);
if (k >= 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1);
if (k > 6 * sizeof(int8_t)) {
const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8);
const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6);
vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2);
vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2);
}
}
}
}
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
// Post-accumulation work
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
vacc0x0123 = vcvtnq_s32_f32(vfpacc0x0123);
vacc0x4567 = vcvtnq_s32_f32(vfpacc0x4567);
vacc1x0123 = vcvtnq_s32_f32(vfpacc1x0123);
vacc1x4567 = vcvtnq_s32_f32(vfpacc1x4567);
const int16x8_t voutput_zero_point = vld1q_dup_s16(¶ms->fp32_neonv8.output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point);
vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point);
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neonv8.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neonv8.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 14,904
| 51.115385
| 114
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c2-minmax-fp32-neon-mlal-dup.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_dup(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int8x8_t va0x0 = vld1_s8(a0); a0 += 8;
const int8x8_t va0x1 = vld1_s8(a0); a0 += 8;
const int8x8_t va1x0 = vld1_s8(a1); a1 += 8;
const int8x8_t va1x1 = vld1_s8(a1); a1 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 0));
const int8x8_t va0c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 0));
const int8x8_t va1c0x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 0));
const int8x8_t va1c0x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 0));
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 1));
const int8x8_t va0c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 1));
const int8x8_t va1c1x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 1));
const int8x8_t va1c1x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 1));
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 2));
const int8x8_t va0c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 2));
const int8x8_t va1c2x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 2));
const int8x8_t va1c2x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 2));
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x0), 3));
const int8x8_t va0c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0x1), 3));
const int8x8_t va1c3x0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x0), 3));
const int8x8_t va1c3x1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1x1), 3));
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int8x8_t va0 = vld1_s8(a0); a0 += 8;
const int8x8_t va1 = vld1_s8(a1); a1 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3));
const int8x8_t va1c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3));
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,123
| 53.329545
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c2-minmax-fp32-neon-mlal-ld1r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld1r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4_t va00x0 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x0 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x0 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x0 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va00x1 = vld1_dup_s16((const void*)a0);
const int16x4_t va01x1 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02x1 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03x1 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va10x0 = vld1_dup_s16((const void*)a1);
const int16x4_t va11x0 = vld1_dup_s16((const void*)(a1 + 2));
const int16x4_t va12x0 = vld1_dup_s16((const void*)(a1 + 4));
const int16x4_t va13x0 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
const int16x4_t va10x1 = vld1_dup_s16((const void*)a1);
const int16x4_t va11x1 = vld1_dup_s16((const void*)(a1 + 2));
const int16x4_t va12x1 = vld1_dup_s16((const void*)(a1 + 4));
const int16x4_t va13x1 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1);
const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0);
const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va01x0);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va01x1);
const int8x8_t va1c1x0 = vreinterpret_s8_s16(va11x0);
const int8x8_t va1c1x1 = vreinterpret_s8_s16(va11x1);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va02x0);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va02x1);
const int8x8_t va1c2x0 = vreinterpret_s8_s16(va12x0);
const int8x8_t va1c2x1 = vreinterpret_s8_s16(va12x1);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va03x0);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va03x1);
const int8x8_t va1c3x0 = vreinterpret_s8_s16(va13x0);
const int8x8_t va1c3x1 = vreinterpret_s8_s16(va13x1);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4_t va00 = vld1_dup_s16((const void*)a0);
const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2));
const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4));
const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8;
const int16x4_t va10 = vld1_dup_s16((const void*)a1);
const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2));
const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4));
const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00);
const int8x8_t va1c0 = vreinterpret_s8_s16(va10);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va01);
const int8x8_t va1c1 = vreinterpret_s8_s16(va11);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va02);
const int8x8_t va1c2 = vreinterpret_s8_s16(va12);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va03);
const int8x8_t va1c3 = vreinterpret_s8_s16(va13);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 19,580
| 51.921622
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c2-minmax-fp32-neon-mlal-ld2r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld2r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x2_t va00x0 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x0 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va00x1 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01x1 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va10x0 = vld2_dup_s16((const void*)a1);
const int16x4x2_t va11x0 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
const int16x4x2_t va10x1 = vld2_dup_s16((const void*)a1);
const int16x4x2_t va11x1 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va00x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va00x1.val[0]);
const int8x8_t va1c0x0 = vreinterpret_s8_s16(va10x0.val[0]);
const int8x8_t va1c0x1 = vreinterpret_s8_s16(va10x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va00x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va00x1.val[1]);
const int8x8_t va1c1x0 = vreinterpret_s8_s16(va10x0.val[1]);
const int8x8_t va1c1x1 = vreinterpret_s8_s16(va10x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va01x0.val[0]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va01x1.val[0]);
const int8x8_t va1c2x0 = vreinterpret_s8_s16(va11x0.val[0]);
const int8x8_t va1c2x1 = vreinterpret_s8_s16(va11x1.val[0]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va01x0.val[1]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va01x1.val[1]);
const int8x8_t va1c3x0 = vreinterpret_s8_s16(va11x0.val[1]);
const int8x8_t va1c3x1 = vreinterpret_s8_s16(va11x1.val[1]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x2_t va00 = vld2_dup_s16((const void*)a0);
const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8;
const int16x4x2_t va10 = vld2_dup_s16((const void*)a1);
const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]);
const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]);
const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]);
const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]);
const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,940
| 51.907821
| 125
|
c
|
XNNPACK
|
XNNPACK-master/src/qs8-qc8w-igemm/gen/qs8-qc8w-igemm-2x8c2-minmax-fp32-neon-mlal-ld4r.c
|
// Auto-generated file. Do not edit!
// Template: src/qs8-igemm/c2-neon-mull-dup.c.in
// Generator: tools/xngen
//
// Copyright 2021 Google LLC
//
// This source code is licensed under the BSD-style license found in the
// LICENSE file in the root directory of this source tree.
#include <assert.h>
#include <arm_neon.h>
#include <xnnpack/gemm.h>
#include <xnnpack/math.h>
void xnn_qs8_qc8w_igemm_minmax_fp32_ukernel_2x8c2__neon_mlal_ld4r(
size_t mr,
size_t nc,
size_t kc,
size_t ks,
const int8_t** restrict a,
const void* restrict w,
int8_t* restrict c,
size_t cm_stride,
size_t cn_stride,
size_t a_offset,
const int8_t* zero,
const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
{
assert(mr != 0);
assert(mr <= 2);
assert(nc != 0);
assert(kc != 0);
assert(ks != 0);
assert(ks % (2 * sizeof(void*)) == 0);
assert(a_offset % sizeof(int8_t) == 0);
assert(a != NULL);
assert(w != NULL);
assert(c != NULL);
kc = round_up_po2(kc, 2 * sizeof(int8_t));
int8_t* c0 = c;
int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
if XNN_UNPREDICTABLE(mr != 2) {
c1 = c0;
}
do {
int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t));
int32x4_t vacc1x0123 = vacc0x0123;
int32x4_t vacc1x4567 = vacc0x4567;
size_t p = ks;
do {
const int8_t* restrict a0 = a[0];
if XNN_UNPREDICTABLE(a0 != zero) {
a0 = (const int8_t*) ((uintptr_t) a0 + a_offset);
}
const int8_t* restrict a1 = a[1];
if XNN_UNPREDICTABLE(a1 != zero) {
a1 = (const int8_t*) ((uintptr_t) a1 + a_offset);
}
a += 2;
size_t k = kc;
while (k >= 16 * sizeof(int8_t)) {
const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8;
const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8;
const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]);
const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]);
const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]);
const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]);
int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0);
int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0);
const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1);
vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0);
int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0);
const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1);
vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]);
const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]);
const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]);
const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]);
int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0);
int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0);
const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1);
vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0);
int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0);
const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1);
vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]);
const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]);
const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]);
const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]);
int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0);
int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0);
const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1);
vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0);
int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0);
const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1);
vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]);
const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]);
const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]);
const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]);
int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0);
int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0);
const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1);
vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0);
int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0);
const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1);
vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 16 * sizeof(int8_t);
}
if (k >= 8 * sizeof(int8_t)) {
const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8;
const int16x4x4_t va1 = vld4_dup_s16((const void*)a1); a1 += 8;
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]);
const int8x8_t va1c0 = vreinterpret_s8_s16(va1.val[0]);
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]);
const int8x8_t va1c1 = vreinterpret_s8_s16(va1.val[1]);
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]);
const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]);
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]);
const int8x8_t va1c3 = vreinterpret_s8_s16(va1.val[3]);
const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3);
const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3);
const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3);
const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3);
k -= 8 * sizeof(int8_t);
}
if XNN_UNLIKELY(k != 0) {
const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k);
const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k);
const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0));
const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0);
const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0);
const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0));
const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0);
const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0);
if (k > 2 * sizeof(int8_t)) {
const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1));
const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1);
const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1);
const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1));
const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1);
const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1);
if (k > 4 * sizeof(int8_t)) {
const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t));
const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2));
const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2);
vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2);
const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2);
vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2);
const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2));
const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2);
vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2);
const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2);
vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2);
}
}
}
p -= 2 * sizeof(void*);
} while (p != 0);
float32x4_t vfpacc0x0123 = vcvtq_f32_s32(vacc0x0123);
float32x4_t vfpacc0x4567 = vcvtq_f32_s32(vacc0x4567);
float32x4_t vfpacc1x0123 = vcvtq_f32_s32(vacc1x0123);
float32x4_t vfpacc1x4567 = vcvtq_f32_s32(vacc1x4567);
const float32x4_t vscale0123 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x0123 = vmulq_f32(vfpacc0x0123, vscale0123);
vfpacc1x0123 = vmulq_f32(vfpacc1x0123, vscale0123);
const float32x4_t vscale4567 = vld1q_f32((const float*) w); w = (const void*) ((const float*) w + 4);
vfpacc0x4567 = vmulq_f32(vfpacc0x4567, vscale4567);
vfpacc1x4567 = vmulq_f32(vfpacc1x4567, vscale4567);
const float32x4_t vmagic_bias = vld1q_dup_f32(¶ms->fp32_neon.magic_bias);
vacc0x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x0123, vmagic_bias));
vacc0x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc0x4567, vmagic_bias));
vacc1x0123 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x0123, vmagic_bias));
vacc1x4567 = vreinterpretq_s32_f32(vaddq_f32(vfpacc1x4567, vmagic_bias));
const int32x4_t vmagic_bias_less_output_zero_point = vld1q_dup_s32(¶ms->fp32_neon.magic_bias_less_output_zero_point);
vacc0x0123 = vqsubq_s32(vacc0x0123, vmagic_bias_less_output_zero_point);
vacc0x4567 = vqsubq_s32(vacc0x4567, vmagic_bias_less_output_zero_point);
vacc1x0123 = vqsubq_s32(vacc1x0123, vmagic_bias_less_output_zero_point);
vacc1x4567 = vqsubq_s32(vacc1x4567, vmagic_bias_less_output_zero_point);
#if XNN_ARCH_ARM64
int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567);
int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567);
int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567);
#else
int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567));
int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567));
int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567));
#endif
const int8x16_t voutput_min = vld1q_dup_s8(¶ms->fp32_neon.output_min);
vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min);
const int8x16_t voutput_max = vld1q_dup_s8(¶ms->fp32_neon.output_max);
vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max);
if (nc >= 8) {
vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567));
vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567));
c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
a = (const int8_t**restrict) ((uintptr_t) a - ks);
nc -= 8;
} else {
if (nc & 4) {
vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4;
vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4);
}
if (nc & 2) {
vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2;
vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2;
vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2);
}
if (nc & 1) {
vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8);
vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0);
}
nc = 0;
}
} while (nc != 0);
}
| 18,482
| 51.508523
| 125
|
c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.