repo
stringlengths
1
152
file
stringlengths
14
221
code
stringlengths
501
25k
file_length
int64
501
25k
avg_line_length
float64
20
99.5
max_line_length
int64
21
134
extension_type
stringclasses
2 values
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__avx_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
6,999
36.433155
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__sse2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
7,279
37.518519
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__sse2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
7,369
37.994709
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__sse41_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
6,883
36.210811
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__sse41_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,001
36.44385
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c0, vout, 0); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c0, vout, 0); c0 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c0, vout, 0); } nc = 0; } } while (nc != 0); }
6,749
34.904255
134
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc22x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc2x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc22x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c0, vout, 0); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c0, vout, 0); c0 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c0, vout, 0); } nc = 0; } } while (nc != 0); }
6,645
34.731183
134
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__xop_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
6,766
34.804233
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c2s4-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c2s4__xop_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
6,884
35.04712
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__avx_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,136
35.78866
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__avx_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,258
35.661616
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__sse2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2)); const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3)); const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2)); const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3)); const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2)); const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3)); __m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13)); __m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13)); __m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13)); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
8,053
39.676768
119
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__sse2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x0, vacc0x2), _mm_unpackhi_epi32(vacc0x0, vacc0x2)); const __m128i vacc0x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x1, vacc0x3), _mm_unpackhi_epi32(vacc0x1, vacc0x3)); const __m128i vacc1x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x0, vacc1x2), _mm_unpackhi_epi32(vacc1x0, vacc1x2)); const __m128i vacc1x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x1, vacc1x3), _mm_unpackhi_epi32(vacc1x1, vacc1x3)); const __m128i vacc2x02 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x0, vacc2x2), _mm_unpackhi_epi32(vacc2x0, vacc2x2)); const __m128i vacc2x13 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x1, vacc2x3), _mm_unpackhi_epi32(vacc2x1, vacc2x3)); __m128i vacc0x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc0x02, vacc0x13), _mm_unpackhi_epi32(vacc0x02, vacc0x13)); __m128i vacc1x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc1x02, vacc1x13), _mm_unpackhi_epi32(vacc1x02, vacc1x13)); __m128i vacc2x0123 = _mm_add_epi32(_mm_unpacklo_epi32(vacc2x02, vacc2x13), _mm_unpackhi_epi32(vacc2x02, vacc2x13)); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
8,147
39.74
119
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__sse41_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,138
35.798969
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__sse41_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,260
35.671717
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-ssse3-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <tmmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__ssse3_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
7,535
37.060606
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-ssse3-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <tmmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__ssse3_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0)); vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0)); vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1)); vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1)); vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2)); vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2)); vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3)); vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3)); vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3)); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc22x0123 = _mm_max_epi16(vacc22x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
7,629
37.15
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__xop_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0); vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1); vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0); vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1); vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0); vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2); vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3); vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2); vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3); vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2); vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,021
34.464646
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x4c8-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c8-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x4c8__xop_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { __m128i vacc0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); __m128i vacc0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m128i vacc0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); __m128i vacc0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m128i vacc1x0 = vacc0x0; __m128i vacc1x1 = vacc0x1; __m128i vacc1x2 = vacc0x2; __m128i vacc1x3 = vacc0x3; __m128i vacc2x0 = vacc0x0; __m128i vacc2x1 = vacc0x1; __m128i vacc2x2 = vacc0x2; __m128i vacc2x3 = vacc0x3; w = (const int32_t*) w + 4; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0); vacc1x0 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0); vacc2x0 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x1 = _mm_maddd_epi16(vxa0, vxb1, vacc0x1); vacc1x1 = _mm_maddd_epi16(vxa1, vxb1, vacc1x1); vacc2x1 = _mm_maddd_epi16(vxa2, vxb1, vacc2x1); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x2 = _mm_maddd_epi16(vxa0, vxb2, vacc0x2); vacc1x2 = _mm_maddd_epi16(vxa1, vxb2, vacc1x2); vacc2x2 = _mm_maddd_epi16(vxa2, vxb2, vacc2x2); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x3 = _mm_maddd_epi16(vxa0, vxb3, vacc0x3); vacc1x3 = _mm_maddd_epi16(vxa1, vxb3, vacc1x3); vacc2x3 = _mm_maddd_epi16(vxa2, vxb3, vacc2x3); w = (const void*) ((const int8_t*) w + 32); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1); const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3); const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1); const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3); const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1); const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3); __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23); __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23); __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
7,143
34.366337
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8-minmax-rndnu-neon-mlal-lane-prfm.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> #include <xnnpack/prefetch.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8__neon_mlal_lane_prfm( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); xnn_prefetch_to_l1((const int8_t*) w + 448); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); } } } } } } } p -= 3 * sizeof(void*); } while (p != 0); // Post-accumulation work const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,114
55.822034
114
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8-minmax-rndnu-neon-mlal-lane.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mlal-lane.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/igemm.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8__neon_mlal_lane( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((const int32_t*) w + 4); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c7 = vmovl_s8(vb01234567c7); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c7), vget_high_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c7), vget_high_s16(vxa2), 3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int16x8_t vxa0 = vmovl_s8(va0); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int16x8_t vxa1 = vmovl_s8(va1); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int16x8_t vxa2 = vmovl_s8(va2); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c0 = vmovl_s8(vb01234567c0); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c0), vget_low_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c0), vget_low_s16(vxa2), 0); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c1 = vmovl_s8(vb01234567c1); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c1), vget_low_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c1), vget_low_s16(vxa2), 1); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c2 = vmovl_s8(vb01234567c2); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c2), vget_low_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c2), vget_low_s16(vxa2), 2); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c3 = vmovl_s8(vb01234567c3); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa0), 3); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa1), 3); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c3), vget_low_s16(vxa2), 3); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c3), vget_low_s16(vxa2), 3); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c4 = vmovl_s8(vb01234567c4); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa0), 0); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa1), 0); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c4), vget_high_s16(vxa2), 0); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c4), vget_high_s16(vxa2), 0); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c5 = vmovl_s8(vb01234567c5); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa0), 1); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa1), 1); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c5), vget_high_s16(vxa2), 1); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c5), vget_high_s16(vxa2), 1); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((const int8_t*) w + 8); const int16x8_t vxb01234567c6 = vmovl_s8(vb01234567c6); vacc0x0123 = vmlal_lane_s16(vacc0x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc0x4567 = vmlal_lane_s16(vacc0x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa0), 2); vacc1x0123 = vmlal_lane_s16(vacc1x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc1x4567 = vmlal_lane_s16(vacc1x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa1), 2); vacc2x0123 = vmlal_lane_s16(vacc2x0123, vget_low_s16(vxb01234567c6), vget_high_s16(vxa2), 2); vacc2x4567 = vmlal_lane_s16(vacc2x4567, vget_high_s16(vxb01234567c6), vget_high_s16(vxa2), 2); } } } } } } } p -= 3 * sizeof(void*); } while (p != 0); // Post-accumulation work const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,026
55.894886
114
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8-minmax-rndnu-neon-mull-addw-dup.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/neon-mull-addw-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/common.h> #include <xnnpack/gemm.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8__neon_mull_addw_dup( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0)); const int16x8_t vprod1x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va1, 0)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c0)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c0)); const int16x8_t vprod2x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va2, 0)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c0)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c0)); const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1)); const int16x8_t vprod1x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va1, 1)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c1)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c1)); const int16x8_t vprod2x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va2, 1)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c1)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c1)); const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2)); const int16x8_t vprod1x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va1, 2)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c2)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c2)); const int16x8_t vprod2x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va2, 2)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c2)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c2)); const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3)); const int16x8_t vprod1x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va1, 3)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c3)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c3)); const int16x8_t vprod2x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va2, 3)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c3)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c3)); const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4)); const int16x8_t vprod1x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va1, 4)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c4)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c4)); const int16x8_t vprod2x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va2, 4)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c4)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c4)); const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5)); const int16x8_t vprod1x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va1, 5)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c5)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c5)); const int16x8_t vprod2x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va2, 5)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c5)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c5)); const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6)); const int16x8_t vprod1x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va1, 6)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c6)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c6)); const int16x8_t vprod2x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va2, 6)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c6)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c6)); const int8x8_t vb01234567c7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va0, 7)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c7)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c7)); const int16x8_t vprod1x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va1, 7)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c7)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c7)); const int16x8_t vprod2x01234567c7 = vmull_s8(vb01234567c7, vdup_lane_s8(va2, 7)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c7)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c7)); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01234567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va0, 0)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c0)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c0)); const int16x8_t vprod1x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va1, 0)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c0)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c0)); const int16x8_t vprod2x01234567c0 = vmull_s8(vb01234567c0, vdup_lane_s8(va2, 0)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c0)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c0)); if (k >= 2 * sizeof(int8_t)) { const int8x8_t vb01234567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va0, 1)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c1)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c1)); const int16x8_t vprod1x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va1, 1)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c1)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c1)); const int16x8_t vprod2x01234567c1 = vmull_s8(vb01234567c1, vdup_lane_s8(va2, 1)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c1)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c1)); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb01234567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va0, 2)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c2)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c2)); const int16x8_t vprod1x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va1, 2)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c2)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c2)); const int16x8_t vprod2x01234567c2 = vmull_s8(vb01234567c2, vdup_lane_s8(va2, 2)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c2)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c2)); if (k >= 4 * sizeof(int8_t)) { const int8x8_t vb01234567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va0, 3)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c3)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c3)); const int16x8_t vprod1x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va1, 3)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c3)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c3)); const int16x8_t vprod2x01234567c3 = vmull_s8(vb01234567c3, vdup_lane_s8(va2, 3)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c3)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c3)); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb01234567c4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va0, 4)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c4)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c4)); const int16x8_t vprod1x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va1, 4)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c4)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c4)); const int16x8_t vprod2x01234567c4 = vmull_s8(vb01234567c4, vdup_lane_s8(va2, 4)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c4)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c4)); if (k >= 6 * sizeof(int8_t)) { const int8x8_t vb01234567c5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va0, 5)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c5)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c5)); const int16x8_t vprod1x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va1, 5)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c5)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c5)); const int16x8_t vprod2x01234567c5 = vmull_s8(vb01234567c5, vdup_lane_s8(va2, 5)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c5)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c5)); if (k > 6 * sizeof(int8_t)) { const int8x8_t vb01234567c6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va0, 6)); vacc0x0123 = vaddw_s16(vacc0x0123, vget_low_s16(vprod0x01234567c6)); vacc0x4567 = vaddw_s16(vacc0x4567, vget_high_s16(vprod0x01234567c6)); const int16x8_t vprod1x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va1, 6)); vacc1x0123 = vaddw_s16(vacc1x0123, vget_low_s16(vprod1x01234567c6)); vacc1x4567 = vaddw_s16(vacc1x4567, vget_high_s16(vprod1x01234567c6)); const int16x8_t vprod2x01234567c6 = vmull_s8(vb01234567c6, vdup_lane_s8(va2, 6)); vacc2x0123 = vaddw_s16(vacc2x0123, vget_low_s16(vprod2x01234567c6)); vacc2x4567 = vaddw_s16(vacc2x4567, vget_high_s16(vprod2x01234567c6)); } } } } } } } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,730
55.79726
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c16-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c16-neon-mlal.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c16__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 16 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; int32x4_t vacc2x0 = vacc0x0; int32x4_t vacc2x1 = vacc0x1; int32x4_t vacc2x2 = vacc0x2; int32x4_t vacc2x3 = vacc0x3; int32x4_t vacc2x4 = vacc0x4; int32x4_t vacc2x5 = vacc0x5; int32x4_t vacc2x6 = vacc0x6; int32x4_t vacc2x7 = vacc0x7; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; // KC loop of 16 with up to 15 remainder size_t k = kc; while (k != 0) { const int8x16_t va0 = vld1q_s8(a0); a0 += 16; const int8x16_t va1 = vld1q_s8(a1); a1 += 16; const int8x16_t va2 = vld1q_s8(a2); a2 += 16; const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0)); int16x8_t vprod1x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va1)); int16x8_t vprod2x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va2)); vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0)); vprod1x0 = vmlal_s8(vprod1x0, vget_high_s8(vb0), vget_high_s8(va1)); vprod2x0 = vmlal_s8(vprod2x0, vget_high_s8(vb0), vget_high_s8(va2)); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0)); int16x8_t vprod1x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va1)); int16x8_t vprod2x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va2)); vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0)); vprod1x1 = vmlal_s8(vprod1x1, vget_high_s8(vb1), vget_high_s8(va1)); vprod2x1 = vmlal_s8(vprod2x1, vget_high_s8(vb1), vget_high_s8(va2)); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0)); int16x8_t vprod1x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va1)); int16x8_t vprod2x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va2)); vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0)); vprod1x2 = vmlal_s8(vprod1x2, vget_high_s8(vb2), vget_high_s8(va1)); vprod2x2 = vmlal_s8(vprod2x2, vget_high_s8(vb2), vget_high_s8(va2)); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0)); int16x8_t vprod1x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va1)); int16x8_t vprod2x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va2)); vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0)); vprod1x3 = vmlal_s8(vprod1x3, vget_high_s8(vb3), vget_high_s8(va1)); vprod2x3 = vmlal_s8(vprod2x3, vget_high_s8(vb3), vget_high_s8(va2)); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0)); int16x8_t vprod1x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va1)); int16x8_t vprod2x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va2)); vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0)); vprod1x4 = vmlal_s8(vprod1x4, vget_high_s8(vb4), vget_high_s8(va1)); vprod2x4 = vmlal_s8(vprod2x4, vget_high_s8(vb4), vget_high_s8(va2)); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0)); int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); int16x8_t vprod2x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va2)); vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0)); vprod1x5 = vmlal_s8(vprod1x5, vget_high_s8(vb5), vget_high_s8(va1)); vprod2x5 = vmlal_s8(vprod2x5, vget_high_s8(vb5), vget_high_s8(va2)); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0)); int16x8_t vprod1x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va1)); int16x8_t vprod2x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va2)); vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0)); vprod1x6 = vmlal_s8(vprod1x6, vget_high_s8(vb6), vget_high_s8(va1)); vprod2x6 = vmlal_s8(vprod2x6, vget_high_s8(vb6), vget_high_s8(va2)); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0)); int16x8_t vprod1x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va1)); int16x8_t vprod2x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va2)); vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0)); vprod1x7 = vmlal_s8(vprod1x7, vget_high_s8(vb7), vget_high_s8(va1)); vprod2x7 = vmlal_s8(vprod2x7, vget_high_s8(vb7), vget_high_s8(va2)); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); k -= 16 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1); const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3); const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23); int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0)); const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1)); const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2)); const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3)); const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1); const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 ); const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4)); const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6)); const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7)); const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5); const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
18,192
52.351906
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2-minmax-rndnu-neon-mlal-ld4r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mlal_ld4r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { const int16x4x4_t va0x0 = vld4_dup_s16((const void*)a0); a0 += 8; const int16x4x4_t va0x1 = vld4_dup_s16((const void*)a0); a0 += 8; const int16x4x4_t va1x0 = vld4_dup_s16((const void*)a1); a1 += 8; const int16x4x4_t va1x1 = vld4_dup_s16((const void*)a1); a1 += 8; const int16x4x4_t va2x0 = vld4_dup_s16((const void*)a2); a2 += 8; const int16x4x4_t va2x1 = vld4_dup_s16((const void*)a2); a2 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0x0 = vreinterpret_s8_s16(va0x0.val[0]); const int8x8_t va0c0x1 = vreinterpret_s8_s16(va0x1.val[0]); const int8x8_t va1c0x0 = vreinterpret_s8_s16(va1x0.val[0]); const int8x8_t va1c0x1 = vreinterpret_s8_s16(va1x1.val[0]); const int8x8_t va2c0x0 = vreinterpret_s8_s16(va2x0.val[0]); const int8x8_t va2c0x1 = vreinterpret_s8_s16(va2x1.val[0]); int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0c0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1c0x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2c0x0); const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0c0x1); vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1c0x1); vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2c0x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0c0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1c0x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2c0x0); const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0c0x1); vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1c0x1); vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2c0x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va0c1x0 = vreinterpret_s8_s16(va0x0.val[1]); const int8x8_t va0c1x1 = vreinterpret_s8_s16(va0x1.val[1]); const int8x8_t va1c1x0 = vreinterpret_s8_s16(va1x0.val[1]); const int8x8_t va1c1x1 = vreinterpret_s8_s16(va1x1.val[1]); const int8x8_t va2c1x0 = vreinterpret_s8_s16(va2x0.val[1]); const int8x8_t va2c1x1 = vreinterpret_s8_s16(va2x1.val[1]); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0c1x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1c1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2c1x0); const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0c1x1); vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1c1x1); vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2c1x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0c1x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1c1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2c1x0); const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0c1x1); vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1c1x1); vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2c1x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va0c2x0 = vreinterpret_s8_s16(va0x0.val[2]); const int8x8_t va0c2x1 = vreinterpret_s8_s16(va0x1.val[2]); const int8x8_t va1c2x0 = vreinterpret_s8_s16(va1x0.val[2]); const int8x8_t va1c2x1 = vreinterpret_s8_s16(va1x1.val[2]); const int8x8_t va2c2x0 = vreinterpret_s8_s16(va2x0.val[2]); const int8x8_t va2c2x1 = vreinterpret_s8_s16(va2x1.val[2]); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0c2x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1c2x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2c2x0); const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0c2x1); vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1c2x1); vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2c2x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0c2x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1c2x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2c2x0); const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0c2x1); vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1c2x1); vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2c2x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va0c3x0 = vreinterpret_s8_s16(va0x0.val[3]); const int8x8_t va0c3x1 = vreinterpret_s8_s16(va0x1.val[3]); const int8x8_t va1c3x0 = vreinterpret_s8_s16(va1x0.val[3]); const int8x8_t va1c3x1 = vreinterpret_s8_s16(va1x1.val[3]); const int8x8_t va2c3x0 = vreinterpret_s8_s16(va2x0.val[3]); const int8x8_t va2c3x1 = vreinterpret_s8_s16(va2x1.val[3]); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0c3x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1c3x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2c3x0); const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0c3x1); vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1c3x1); vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2c3x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0c3x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1c3x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2c3x0); const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0c3x1); vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1c3x1); vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2c3x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 16 * sizeof(int8_t); } if (k >= 8 * sizeof(int8_t)) { const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8; const int16x4x4_t va1 = vld4_dup_s16((const void*)a1); a1 += 8; const int16x4x4_t va2 = vld4_dup_s16((const void*)a2); a2 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s16(va1.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s16(va2.val[0]); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s16(va1.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s16(va2.val[1]); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]); const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); const int8x8_t va2c2 = vreinterpret_s8_s16(va2.val[2]); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]); const int8x8_t va1c3 = vreinterpret_s8_s16(va1.val[3]); const int8x8_t va2c3 = vreinterpret_s8_s16(va2.val[3]); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); } } } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
24,794
53.614537
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2-minmax-rndnu-neon-mull-dup.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_dup( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)); const int8x8_t va1c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)); const int8x8_t va2c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3)); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); } } } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,583
49.871166
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2-minmax-rndnu-neon-mull-ld1r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld1r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int16x4_t va00 = vld1_dup_s16((const void*)a0); const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2)); const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4)); const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8; const int16x4_t va10 = vld1_dup_s16((const void*)a1); const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2)); const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4)); const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8; const int16x4_t va20 = vld1_dup_s16((const void*)a2); const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2)); const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4)); const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va00); const int8x8_t va1c0 = vreinterpret_s8_s16(va10); const int8x8_t va2c0 = vreinterpret_s8_s16(va20); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va01); const int8x8_t va1c1 = vreinterpret_s8_s16(va11); const int8x8_t va2c1 = vreinterpret_s8_s16(va21); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va02); const int8x8_t va1c2 = vreinterpret_s8_s16(va12); const int8x8_t va2c2 = vreinterpret_s8_s16(va22); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va03); const int8x8_t va1c3 = vreinterpret_s8_s16(va13); const int8x8_t va2c3 = vreinterpret_s8_s16(va23); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); } } } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,800
49.152239
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2-minmax-rndnu-neon-mull-ld2r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld2r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8; const int16x4x2_t va10 = vld2_dup_s16((const void*)a1); const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8; const int16x4x2_t va20 = vld2_dup_s16((const void*)a2); const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]); const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]); const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]); const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]); const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); } } } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,488
49.118541
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2-minmax-rndnu-neon-mull-ld4r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2__neon_mull_ld4r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8; const int16x4x4_t va1 = vld4_dup_s16((const void*)a1); a1 += 8; const int16x4x4_t va2 = vld4_dup_s16((const void*)a2); a2 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s16(va1.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s16(va2.val[0]); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s16(va1.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s16(va2.val[1]); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]); const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); const int8x8_t va2c2 = vreinterpret_s8_s16(va2.val[2]); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]); const int8x8_t va1c3 = vreinterpret_s8_s16(va1.val[3]); const int8x8_t va2c3 = vreinterpret_s8_s16(va2.val[3]); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); } } } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,263
48.889571
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2s4-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2s4__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va0x1 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va1x1 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va2x1 = vld1_s8(a2); a2 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1); vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1x1); vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1); vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1x1); vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); va0x0 = vext_s8(va0x0, va0x0, 2); va0x1 = vext_s8(va0x1, va0x1, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va1x1 = vext_s8(va1x1, va1x1, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va2x1 = vext_s8(va2x1, va2x1, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1); vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1x1); vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1); vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1x1); vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); va0x0 = vext_s8(va0x0, va0x0, 2); va0x1 = vext_s8(va0x1, va0x1, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va1x1 = vext_s8(va1x1, va1x1, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va2x1 = vext_s8(va2x1, va2x1, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1); vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1x1); vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1); vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1x1); vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); va0x0 = vext_s8(va0x0, va0x0, 2); va0x1 = vext_s8(va0x1, va0x1, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va1x1 = vext_s8(va1x1, va1x1, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va2x1 = vext_s8(va2x1, va2x1, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1); vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1x1); vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1); vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1x1); vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 16 * sizeof(int8_t); } if (k != 0) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); } p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
18,020
47.837398
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c2s4-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c2s4__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
11,128
43.162698
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4-minmax-rndnu-neon-mlal-dup.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mlal_dup( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { const int8x8_t va0x0 = vld1_s8(a0); a0 += 8; const int8x8_t va0x1 = vld1_s8(a0); a0 += 8; const int8x8_t va1x0 = vld1_s8(a1); a1 += 8; const int8x8_t va1x1 = vld1_s8(a1); a1 += 8; const int8x8_t va2x0 = vld1_s8(a2); a2 += 8; const int8x8_t va2x1 = vld1_s8(a2); a2 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 0)); const int8x8_t va0c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 0)); const int8x8_t va1c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1x0), 0)); const int8x8_t va1c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1x1), 0)); const int8x8_t va2c0x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2x0), 0)); const int8x8_t va2c0x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2x1), 0)); int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1c0x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2c0x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1c0x1); vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2c0x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1c0x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2c0x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1c0x1); vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2c0x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1c0x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2c0x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1c0x1); vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2c0x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1c0x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2c0x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1c0x1); vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2c0x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x0), 1)); const int8x8_t va0c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0x1), 1)); const int8x8_t va1c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1x0), 1)); const int8x8_t va1c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1x1), 1)); const int8x8_t va2c1x0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2x0), 1)); const int8x8_t va2c1x1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2x1), 1)); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1c1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2c1x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1c1x1); vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2c1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1c1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2c1x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1c1x1); vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2c1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1c1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2c1x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1c1x1); vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2c1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1c1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2c1x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1c1x1); vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2c1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 16 * sizeof(int8_t); } if (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1)); const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1)); const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1)); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
23,712
53.015945
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4-minmax-rndnu-neon-mlal-ld1r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mlal_ld1r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { const int32x2_t va00x0 = vld1_dup_s32((const void*)a0); const int32x2_t va01x0 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8; const int32x2_t va00x1 = vld1_dup_s32((const void*)a0); const int32x2_t va01x1 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8; const int32x2_t va10x0 = vld1_dup_s32((const void*)a1); const int32x2_t va11x0 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8; const int32x2_t va10x1 = vld1_dup_s32((const void*)a1); const int32x2_t va11x1 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8; const int32x2_t va20x0 = vld1_dup_s32((const void*)a2); const int32x2_t va21x0 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8; const int32x2_t va20x1 = vld1_dup_s32((const void*)a2); const int32x2_t va21x1 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0x0 = vreinterpret_s8_s32(va00x0); const int8x8_t va0c0x1 = vreinterpret_s8_s32(va00x1); const int8x8_t va1c0x0 = vreinterpret_s8_s32(va10x0); const int8x8_t va1c0x1 = vreinterpret_s8_s32(va10x1); const int8x8_t va2c0x0 = vreinterpret_s8_s32(va20x0); const int8x8_t va2c0x1 = vreinterpret_s8_s32(va20x1); int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1c0x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2c0x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1c0x1); vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2c0x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1c0x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2c0x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1c0x1); vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2c0x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1c0x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2c0x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1c0x1); vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2c0x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1c0x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2c0x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1c0x1); vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2c0x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1x0 = vreinterpret_s8_s32(va01x0); const int8x8_t va0c1x1 = vreinterpret_s8_s32(va01x1); const int8x8_t va1c1x0 = vreinterpret_s8_s32(va11x0); const int8x8_t va1c1x1 = vreinterpret_s8_s32(va11x1); const int8x8_t va2c1x0 = vreinterpret_s8_s32(va21x0); const int8x8_t va2c1x1 = vreinterpret_s8_s32(va21x1); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1c1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2c1x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1c1x1); vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2c1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1c1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2c1x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1c1x1); vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2c1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1c1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2c1x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1c1x1); vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2c1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1c1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2c1x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1c1x1); vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2c1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 16 * sizeof(int8_t); } if (k >= 8 * sizeof(int8_t)) { const int32x2_t va00 = vld1_dup_s32((const void*)a0); const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8; const int32x2_t va10 = vld1_dup_s32((const void*)a1); const int32x2_t va11 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8; const int32x2_t va20 = vld1_dup_s32((const void*)a2); const int32x2_t va21 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va00); const int8x8_t va1c0 = vreinterpret_s8_s32(va10); const int8x8_t va2c0 = vreinterpret_s8_s32(va20); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va01); const int8x8_t va1c1 = vreinterpret_s8_s32(va11); const int8x8_t va2c1 = vreinterpret_s8_s32(va21); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
23,833
52.200893
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4-minmax-rndnu-neon-mlal-ld2r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mlal_ld2r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { const int32x2x2_t va0x0 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va0x1 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va1x0 = vld2_dup_s32((const void*)a1); a1 += 8; const int32x2x2_t va1x1 = vld2_dup_s32((const void*)a1); a1 += 8; const int32x2x2_t va2x0 = vld2_dup_s32((const void*)a2); a2 += 8; const int32x2x2_t va2x1 = vld2_dup_s32((const void*)a2); a2 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0x0 = vreinterpret_s8_s32(va0x0.val[0]); const int8x8_t va0c0x1 = vreinterpret_s8_s32(va0x1.val[0]); const int8x8_t va1c0x0 = vreinterpret_s8_s32(va1x0.val[0]); const int8x8_t va1c0x1 = vreinterpret_s8_s32(va1x1.val[0]); const int8x8_t va2c0x0 = vreinterpret_s8_s32(va2x0.val[0]); const int8x8_t va2c0x1 = vreinterpret_s8_s32(va2x1.val[0]); int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0c0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1c0x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2c0x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0c0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1c0x1); vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2c0x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0c0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1c0x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2c0x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0c0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1c0x1); vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2c0x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0c0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1c0x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2c0x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0c0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1c0x1); vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2c0x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0c0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1c0x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2c0x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0c0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1c0x1); vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2c0x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1x0 = vreinterpret_s8_s32(va0x0.val[1]); const int8x8_t va0c1x1 = vreinterpret_s8_s32(va0x1.val[1]); const int8x8_t va1c1x0 = vreinterpret_s8_s32(va1x0.val[1]); const int8x8_t va1c1x1 = vreinterpret_s8_s32(va1x1.val[1]); const int8x8_t va2c1x0 = vreinterpret_s8_s32(va2x0.val[1]); const int8x8_t va2c1x1 = vreinterpret_s8_s32(va2x1.val[1]); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0c1x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1c1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2c1x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0c1x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1c1x1); vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2c1x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0c1x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1c1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2c1x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0c1x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1c1x1); vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2c1x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0c1x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1c1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2c1x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0c1x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1c1x1); vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2c1x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0c1x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1c1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2c1x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0c1x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1c1x1); vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2c1x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 16 * sizeof(int8_t); } if (k >= 8 * sizeof(int8_t)) { const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va1 = vld2_dup_s32((const void*)a1); a1 += 8; const int32x2x2_t va2 = vld2_dup_s32((const void*)a2); a2 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s32(va1.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s32(va2.val[0]); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s32(va1.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s32(va2.val[1]); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
23,326
52.136674
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4-minmax-rndnu-neon-mull-dup.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_dup( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1)); const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1)); const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1)); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,157
48.716923
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4-minmax-rndnu-neon-mull-ld1r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld1r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int32x2_t va00 = vld1_dup_s32((const void*)a0); const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8; const int32x2_t va10 = vld1_dup_s32((const void*)a1); const int32x2_t va11 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8; const int32x2_t va20 = vld1_dup_s32((const void*)a2); const int32x2_t va21 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va00); const int8x8_t va1c0 = vreinterpret_s8_s32(va10); const int8x8_t va2c0 = vreinterpret_s8_s32(va20); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va01); const int8x8_t va1c1 = vreinterpret_s8_s32(va11); const int8x8_t va2c1 = vreinterpret_s8_s32(va21); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,194
48.375
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4-minmax-rndnu-neon-mull-ld2r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4__neon_mull_ld2r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va1 = vld2_dup_s32((const void*)a1); a1 += 8; const int32x2x2_t va2 = vld2_dup_s32((const void*)a2); a2 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s32(va1.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s32(va2.val[0]); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s32(va1.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s32(va2.val[1]); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,029
48.323077
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4s2-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va0x1 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va1x1 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va2x1 = vld1_s8(a2); a2 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1); vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1); vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1); vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1); vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va0x1 = vext_s8(va0x1, va0x1, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va1x1 = vext_s8(va1x1, va1x1, 4); va2x0 = vext_s8(va2x0, va2x0, 4); va2x1 = vext_s8(va2x1, va2x1, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1); vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1); vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1); vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1); vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 16 * sizeof(int8_t); } if (k != 0) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va2x0 = vext_s8(va2x0, va2x0, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
18,823
47.893506
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c4s2-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c4s2__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; do { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va2x0 = vext_s8(va2x0, va2x0, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
12,851
44.9
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c8-minmax-fp32-avx2.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx8c8-avx2.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <immintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/intrinsics-polyfill.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_3x8c8__avx2( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { const __m128i vbias0x0 = _mm_cvtsi32_si128(((const int*) w)[0]); const __m128i vbias0x1 = _mm_cvtsi32_si128(((const int*) w)[1]); __m256i vacc0x01 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x0), vbias0x1, 1); const __m128i vbias0x2 = _mm_cvtsi32_si128(((const int*) w)[2]); const __m128i vbias0x3 = _mm_cvtsi32_si128(((const int*) w)[3]); __m256i vacc0x23 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x2), vbias0x3, 1); const __m128i vbias0x4 = _mm_cvtsi32_si128(((const int*) w)[4]); const __m128i vbias0x5 = _mm_cvtsi32_si128(((const int*) w)[5]); __m256i vacc0x45 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x4), vbias0x5, 1); const __m128i vbias0x6 = _mm_cvtsi32_si128(((const int*) w)[6]); const __m128i vbias0x7 = _mm_cvtsi32_si128(((const int*) w)[7]); __m256i vacc0x67 = _mm256_inserti128_si256(_mm256_castsi128_si256(vbias0x6), vbias0x7, 1); __m256i vacc1x01 = vacc0x01; __m256i vacc1x23 = vacc0x23; __m256i vacc1x45 = vacc0x45; __m256i vacc1x67 = vacc0x67; __m256i vacc2x01 = vacc0x01; __m256i vacc2x23 = vacc0x23; __m256i vacc2x45 = vacc0x45; __m256i vacc2x67 = vacc0x67; w = (const int32_t*) w + 8; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = 0; while (k < kc) { const __m128i va0 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a0)); const __m256i vxa0 = _mm256_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a1)); const __m256i vxa1 = _mm256_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_broadcastq_epi64(_mm_loadl_epi64((const __m128i*) a2)); const __m256i vxa2 = _mm256_cvtepi8_epi16(va2); a2 += 8; const __m128i vb01 = _mm_load_si128((const __m128i*) w); const __m256i vxb01 = _mm256_cvtepi8_epi16(vb01); vacc0x01 = _mm256_add_epi32(vacc0x01, _mm256_madd_epi16(vxa0, vxb01)); vacc1x01 = _mm256_add_epi32(vacc1x01, _mm256_madd_epi16(vxa1, vxb01)); vacc2x01 = _mm256_add_epi32(vacc2x01, _mm256_madd_epi16(vxa2, vxb01)); const __m128i vb23 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 16)); const __m256i vxb23 = _mm256_cvtepi8_epi16(vb23); vacc0x23 = _mm256_add_epi32(vacc0x23, _mm256_madd_epi16(vxa0, vxb23)); vacc1x23 = _mm256_add_epi32(vacc1x23, _mm256_madd_epi16(vxa1, vxb23)); vacc2x23 = _mm256_add_epi32(vacc2x23, _mm256_madd_epi16(vxa2, vxb23)); const __m128i vb45 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 32)); const __m256i vxb45 = _mm256_cvtepi8_epi16(vb45); vacc0x45 = _mm256_add_epi32(vacc0x45, _mm256_madd_epi16(vxa0, vxb45)); vacc1x45 = _mm256_add_epi32(vacc1x45, _mm256_madd_epi16(vxa1, vxb45)); vacc2x45 = _mm256_add_epi32(vacc2x45, _mm256_madd_epi16(vxa2, vxb45)); const __m128i vb67 = _mm_load_si128((const __m128i*) ((const int8_t*) w + 48)); const __m256i vxb67 = _mm256_cvtepi8_epi16(vb67); vacc0x67 = _mm256_add_epi32(vacc0x67, _mm256_madd_epi16(vxa0, vxb67)); vacc1x67 = _mm256_add_epi32(vacc1x67, _mm256_madd_epi16(vxa1, vxb67)); vacc2x67 = _mm256_add_epi32(vacc2x67, _mm256_madd_epi16(vxa2, vxb67)); w = (const void*) ((const int8_t*) w + 64); k += 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); const __m256i vacc0x0213 = _mm256_hadd_epi32(vacc0x01, vacc0x23); const __m256i vacc0x4657 = _mm256_hadd_epi32(vacc0x45, vacc0x67); const __m256i vacc1x0213 = _mm256_hadd_epi32(vacc1x01, vacc1x23); const __m256i vacc1x4657 = _mm256_hadd_epi32(vacc1x45, vacc1x67); const __m256i vacc2x0213 = _mm256_hadd_epi32(vacc2x01, vacc2x23); const __m256i vacc2x4657 = _mm256_hadd_epi32(vacc2x45, vacc2x67); const __m256i vacc0x02461357 = _mm256_hadd_epi32(vacc0x0213, vacc0x4657); const __m256i vacc1x02461357 = _mm256_hadd_epi32(vacc1x0213, vacc1x4657); const __m256i vacc2x02461357 = _mm256_hadd_epi32(vacc2x0213, vacc2x4657); const __m256i vpermute_mask = _mm256_set_epi32(7, 3, 6, 2, 5, 1, 4, 0); __m256i vacc0x01234567 = _mm256_permutevar8x32_epi32(vacc0x02461357, vpermute_mask); __m256i vacc1x01234567 = _mm256_permutevar8x32_epi32(vacc1x02461357, vpermute_mask); __m256i vacc2x01234567 = _mm256_permutevar8x32_epi32(vacc2x02461357, vpermute_mask); __m256 vscaled0x01234567 = _mm256_cvtepi32_ps(vacc0x01234567); __m256 vscaled1x01234567 = _mm256_cvtepi32_ps(vacc1x01234567); __m256 vscaled2x01234567 = _mm256_cvtepi32_ps(vacc2x01234567); const __m256 vscale = _mm256_load_ps(params->fp32_avx2.scale); vscaled0x01234567 = _mm256_mul_ps(vscaled0x01234567, vscale); vscaled1x01234567 = _mm256_mul_ps(vscaled1x01234567, vscale); vscaled2x01234567 = _mm256_mul_ps(vscaled2x01234567, vscale); const __m256 voutput_max_less_zero_point = _mm256_load_ps(params->fp32_avx2.output_max_less_zero_point); vscaled0x01234567 = _mm256_min_ps(vscaled0x01234567, voutput_max_less_zero_point); vscaled1x01234567 = _mm256_min_ps(vscaled1x01234567, voutput_max_less_zero_point); vscaled2x01234567 = _mm256_min_ps(vscaled2x01234567, voutput_max_less_zero_point); vacc0x01234567 = _mm256_cvtps_epi32(vscaled0x01234567); vacc1x01234567 = _mm256_cvtps_epi32(vscaled1x01234567); vacc2x01234567 = _mm256_cvtps_epi32(vscaled2x01234567); const __m256i voutput_zero_point = _mm256_load_si256((const __m256i*) params->fp32_avx2.output_zero_point); __m256i vacc01x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc0x01234567, vacc1x01234567), voutput_zero_point); __m256i vacc22x01234567 = _mm256_adds_epi16(_mm256_packs_epi32(vacc2x01234567, vacc2x01234567), voutput_zero_point); vacc01x01234567 = _mm256_permute4x64_epi64(vacc01x01234567, _MM_SHUFFLE(3, 1, 2, 0)); vacc22x01234567 = _mm256_permute4x64_epi64(vacc22x01234567, _MM_SHUFFLE(3, 1, 2, 0)); __m256i vout = _mm256_packs_epi16(vacc01x01234567, vacc22x01234567); vout = _mm256_max_epi8(vout, _mm256_load_si256((const __m256i*) params->fp32_avx2.output_min)); __m128i vout_lo = _mm256_castsi256_si128(vout); __m128i vout_hi = _mm256_extracti128_si256(vout, 1); if (nc >= 8) { _mm_storeh_pi((__m64*) c2, _mm_castsi128_ps(vout_lo)); _mm_storel_epi64((__m128i*) c1, vout_hi); _mm_storel_epi64((__m128i*) c0, vout_lo); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout_lo, 2)); _mm_storeu_si32(c1, vout_hi); _mm_storeu_si32(c0, vout_lo); c2 += 4; c1 += 4; c0 += 4; vout_lo = _mm_srli_epi64(vout_lo, 32); vout_hi = _mm_srli_epi64(vout_hi, 32); } if (nc & 2) { unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout_lo, 4)); unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout_hi, 0)); unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout_lo, 0)); c2 += 2; c1 += 2; c0 += 2; vout_lo = _mm_srli_epi32(vout_lo, 16); vout_hi = _mm_srli_epi32(vout_hi, 16); } if (nc & 1) { *c2 = (int8_t) _mm_extract_epi8(vout_lo, 8); *c1 = (int8_t) _mm_extract_epi8(vout_hi, 0); *c0 = (int8_t) _mm_extract_epi8(vout_lo, 0); } nc = 0; } } while (nc != 0); }
9,471
39.652361
120
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c8-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c8-neon-mull.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; int32x4_t vacc2x0 = vacc0x0; int32x4_t vacc2x1 = vacc0x1; int32x4_t vacc2x2 = vacc0x2; int32x4_t vacc2x3 = vacc0x3; int32x4_t vacc2x4 = vacc0x4; int32x4_t vacc2x5 = vacc0x5; int32x4_t vacc2x6 = vacc0x6; int32x4_t vacc2x7 = vacc0x7; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; // 2x partial unrolled loop to load 16 bytes at a time using MLA. while (k >= 16 * sizeof(int8_t)) { const int8x8_t va0x0 = vld1_s8(a0); a0 += 8; const int8x8_t va0x1 = vld1_s8(a0); a0 += 8; const int8x8_t va1x0 = vld1_s8(a1); a1 += 8; const int8x8_t va1x1 = vld1_s8(a1); a1 += 8; const int8x8_t va2x0 = vld1_s8(a2); a2 += 8; const int8x8_t va2x1 = vld1_s8(a2); a2 += 8; const int8x8_t vb0x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb1x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb2x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb3x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb4x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb5x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb6x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb7x0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); const int8x8_t vb0x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x0 = vmull_s8(vb0x0, va0x0); int16x8_t vprod1x0 = vmull_s8(vb0x0, va1x0); int16x8_t vprod2x0 = vmull_s8(vb0x0, va2x0); vprod0x0 = vmlal_s8(vprod0x0, vb0x1, va0x1); vprod1x0 = vmlal_s8(vprod1x0, vb0x1, va1x1); vprod2x0 = vmlal_s8(vprod2x0, vb0x1, va2x1); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); const int8x8_t vb1x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x1 = vmull_s8(vb1x0, va0x0); int16x8_t vprod1x1 = vmull_s8(vb1x0, va1x0); int16x8_t vprod2x1 = vmull_s8(vb1x0, va2x0); vprod0x1 = vmlal_s8(vprod0x1, vb1x1, va0x1); vprod1x1 = vmlal_s8(vprod1x1, vb1x1, va1x1); vprod2x1 = vmlal_s8(vprod2x1, vb1x1, va2x1); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); const int8x8_t vb2x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x2 = vmull_s8(vb2x0, va0x0); int16x8_t vprod1x2 = vmull_s8(vb2x0, va1x0); int16x8_t vprod2x2 = vmull_s8(vb2x0, va2x0); vprod0x2 = vmlal_s8(vprod0x2, vb2x1, va0x1); vprod1x2 = vmlal_s8(vprod1x2, vb2x1, va1x1); vprod2x2 = vmlal_s8(vprod2x2, vb2x1, va2x1); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); const int8x8_t vb3x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x3 = vmull_s8(vb3x0, va0x0); int16x8_t vprod1x3 = vmull_s8(vb3x0, va1x0); int16x8_t vprod2x3 = vmull_s8(vb3x0, va2x0); vprod0x3 = vmlal_s8(vprod0x3, vb3x1, va0x1); vprod1x3 = vmlal_s8(vprod1x3, vb3x1, va1x1); vprod2x3 = vmlal_s8(vprod2x3, vb3x1, va2x1); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); const int8x8_t vb4x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x4 = vmull_s8(vb4x0, va0x0); int16x8_t vprod1x4 = vmull_s8(vb4x0, va1x0); int16x8_t vprod2x4 = vmull_s8(vb4x0, va2x0); vprod0x4 = vmlal_s8(vprod0x4, vb4x1, va0x1); vprod1x4 = vmlal_s8(vprod1x4, vb4x1, va1x1); vprod2x4 = vmlal_s8(vprod2x4, vb4x1, va2x1); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); const int8x8_t vb5x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x5 = vmull_s8(vb5x0, va0x0); int16x8_t vprod1x5 = vmull_s8(vb5x0, va1x0); int16x8_t vprod2x5 = vmull_s8(vb5x0, va2x0); vprod0x5 = vmlal_s8(vprod0x5, vb5x1, va0x1); vprod1x5 = vmlal_s8(vprod1x5, vb5x1, va1x1); vprod2x5 = vmlal_s8(vprod2x5, vb5x1, va2x1); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); const int8x8_t vb6x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x6 = vmull_s8(vb6x0, va0x0); int16x8_t vprod1x6 = vmull_s8(vb6x0, va1x0); int16x8_t vprod2x6 = vmull_s8(vb6x0, va2x0); vprod0x6 = vmlal_s8(vprod0x6, vb6x1, va0x1); vprod1x6 = vmlal_s8(vprod1x6, vb6x1, va1x1); vprod2x6 = vmlal_s8(vprod2x6, vb6x1, va2x1); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); const int8x8_t vb7x1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof( int8_t)); int16x8_t vprod0x7 = vmull_s8(vb7x0, va0x0); int16x8_t vprod1x7 = vmull_s8(vb7x0, va1x0); int16x8_t vprod2x7 = vmull_s8(vb7x0, va2x0); vprod0x7 = vmlal_s8(vprod0x7, vb7x1, va0x1); vprod1x7 = vmlal_s8(vprod1x7, vb7x1, va1x1); vprod2x7 = vmlal_s8(vprod2x7, vb7x1, va2x1); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); k -= 16 * sizeof(int8_t); } // Handle 8 bytes at a time using MUL. if (k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x0 = vmull_s8(vb0, va0); const int16x8_t vprod1x0 = vmull_s8(vb0, va1); const int16x8_t vprod2x0 = vmull_s8(vb0, va2); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x1 = vmull_s8(vb1, va0); const int16x8_t vprod1x1 = vmull_s8(vb1, va1); const int16x8_t vprod2x1 = vmull_s8(vb1, va2); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x2 = vmull_s8(vb2, va0); const int16x8_t vprod1x2 = vmull_s8(vb2, va1); const int16x8_t vprod2x2 = vmull_s8(vb2, va2); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x3 = vmull_s8(vb3, va0); const int16x8_t vprod1x3 = vmull_s8(vb3, va1); const int16x8_t vprod2x3 = vmull_s8(vb3, va2); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x4 = vmull_s8(vb4, va0); const int16x8_t vprod1x4 = vmull_s8(vb4, va1); const int16x8_t vprod2x4 = vmull_s8(vb4, va2); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x5 = vmull_s8(vb5, va0); const int16x8_t vprod1x5 = vmull_s8(vb5, va1); const int16x8_t vprod2x5 = vmull_s8(vb5, va2); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x6 = vmull_s8(vb6, va0); const int16x8_t vprod1x6 = vmull_s8(vb6, va1); const int16x8_t vprod2x6 = vmull_s8(vb6, va2); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x7 = vmull_s8(vb7, va0); const int16x8_t vprod1x7 = vmull_s8(vb7, va1); const int16x8_t vprod2x7 = vmull_s8(vb7, va2); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); k -= 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1); const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3); const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23); int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0)); const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1)); const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2)); const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3)); const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1); const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 ); const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4)); const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6)); const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7)); const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5); const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
21,849
50.051402
114
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-3x8c8-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c8-neon-mull.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_3x8c8__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 3); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (3 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; int32x4_t vacc2x0 = vacc0x0; int32x4_t vacc2x1 = vacc0x1; int32x4_t vacc2x2 = vacc0x2; int32x4_t vacc2x3 = vacc0x3; int32x4_t vacc2x4 = vacc0x4; int32x4_t vacc2x5 = vacc0x5; int32x4_t vacc2x6 = vacc0x6; int32x4_t vacc2x7 = vacc0x7; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } a += 3; size_t k = kc; // Handle 8 bytes at a time using MUL. while (k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x0 = vmull_s8(vb0, va0); const int16x8_t vprod1x0 = vmull_s8(vb0, va1); const int16x8_t vprod2x0 = vmull_s8(vb0, va2); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x1 = vmull_s8(vb1, va0); const int16x8_t vprod1x1 = vmull_s8(vb1, va1); const int16x8_t vprod2x1 = vmull_s8(vb1, va2); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x2 = vmull_s8(vb2, va0); const int16x8_t vprod1x2 = vmull_s8(vb2, va1); const int16x8_t vprod2x2 = vmull_s8(vb2, va2); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x3 = vmull_s8(vb3, va0); const int16x8_t vprod1x3 = vmull_s8(vb3, va1); const int16x8_t vprod2x3 = vmull_s8(vb3, va2); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x4 = vmull_s8(vb4, va0); const int16x8_t vprod1x4 = vmull_s8(vb4, va1); const int16x8_t vprod2x4 = vmull_s8(vb4, va2); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x5 = vmull_s8(vb5, va0); const int16x8_t vprod1x5 = vmull_s8(vb5, va1); const int16x8_t vprod2x5 = vmull_s8(vb5, va2); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x6 = vmull_s8(vb6, va0); const int16x8_t vprod1x6 = vmull_s8(vb6, va1); const int16x8_t vprod2x6 = vmull_s8(vb6, va2); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x7 = vmull_s8(vb7, va0); const int16x8_t vprod1x7 = vmull_s8(vb7, va1); const int16x8_t vprod2x7 = vmull_s8(vb7, va2); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); k -= 8 * sizeof(int8_t); } p -= 3 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1); const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3); const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23); int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0)); const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1)); const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2)); const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3)); const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1); const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 ); const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4)); const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6)); const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7)); const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5); const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x8_t vout2x01234567 = vqmovn_s16(vacc2x01234567); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567 = vmax_s8(vout2x01234567, vget_low_s8(voutput_min)); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567 = vmin_s8(vout2x01234567, vget_low_s8(voutput_max)); if (nc >= 8) { vst1_s8(c2 + 0, vout2x01234567); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1_lane_u32((void*) c2, vreinterpret_u32_s8(vout2x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1_lane_u16((void*) c2, vreinterpret_u16_s8(vout2x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567 = vext_s8(vout2x01234567, vout2x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1_lane_s8(c2, vout2x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,016
47.981651
114
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x16c2s4-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c2s4__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc1x89AB = vacc0x89AB; int32x4_t vacc1xCDEF = vacc0xCDEF; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc2x89AB = vacc0x89AB; int32x4_t vacc2xCDEF = vacc0xCDEF; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc3x89AB = vacc0x89AB; int32x4_t vacc3xCDEF = vacc0xCDEF; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb89ABc0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vbCDEFc0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb89ABc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vbCDEFc1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb89ABc2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vbCDEFc2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb89ABc3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vbCDEFc3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); int16x8_t vprod0x89ABc0 = vmull_s8(vb89ABc0x0, va0x0); int16x8_t vprod1x89ABc0 = vmull_s8(vb89ABc0x0, va1x0); int16x8_t vprod2x89ABc0 = vmull_s8(vb89ABc0x0, va2x0); int16x8_t vprod3x89ABc0 = vmull_s8(vb89ABc0x0, va3x0); vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc0); vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc0); vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc0); vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc0); int16x8_t vprod0xCDEFc0 = vmull_s8(vbCDEFc0x0, va0x0); int16x8_t vprod1xCDEFc0 = vmull_s8(vbCDEFc0x0, va1x0); int16x8_t vprod2xCDEFc0 = vmull_s8(vbCDEFc0x0, va2x0); int16x8_t vprod3xCDEFc0 = vmull_s8(vbCDEFc0x0, va3x0); vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc0); vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc0); vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc0); vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc0); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); int16x8_t vprod0x89ABc1 = vmull_s8(vb89ABc1x0, va0x0); int16x8_t vprod1x89ABc1 = vmull_s8(vb89ABc1x0, va1x0); int16x8_t vprod2x89ABc1 = vmull_s8(vb89ABc1x0, va2x0); int16x8_t vprod3x89ABc1 = vmull_s8(vb89ABc1x0, va3x0); vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc1); vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc1); vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc1); vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc1); int16x8_t vprod0xCDEFc1 = vmull_s8(vbCDEFc1x0, va0x0); int16x8_t vprod1xCDEFc1 = vmull_s8(vbCDEFc1x0, va1x0); int16x8_t vprod2xCDEFc1 = vmull_s8(vbCDEFc1x0, va2x0); int16x8_t vprod3xCDEFc1 = vmull_s8(vbCDEFc1x0, va3x0); vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc1); vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc1); vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc1); vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc1); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); int16x8_t vprod0x89ABc2 = vmull_s8(vb89ABc2x0, va0x0); int16x8_t vprod1x89ABc2 = vmull_s8(vb89ABc2x0, va1x0); int16x8_t vprod2x89ABc2 = vmull_s8(vb89ABc2x0, va2x0); int16x8_t vprod3x89ABc2 = vmull_s8(vb89ABc2x0, va3x0); vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc2); vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc2); vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc2); vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc2); int16x8_t vprod0xCDEFc2 = vmull_s8(vbCDEFc2x0, va0x0); int16x8_t vprod1xCDEFc2 = vmull_s8(vbCDEFc2x0, va1x0); int16x8_t vprod2xCDEFc2 = vmull_s8(vbCDEFc2x0, va2x0); int16x8_t vprod3xCDEFc2 = vmull_s8(vbCDEFc2x0, va3x0); vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc2); vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc2); vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc2); vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc2); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); int16x8_t vprod0x89ABc3 = vmull_s8(vb89ABc3x0, va0x0); int16x8_t vprod1x89ABc3 = vmull_s8(vb89ABc3x0, va1x0); int16x8_t vprod2x89ABc3 = vmull_s8(vb89ABc3x0, va2x0); int16x8_t vprod3x89ABc3 = vmull_s8(vb89ABc3x0, va3x0); vacc0x89AB = vpadalq_s16(vacc0x89AB, vprod0x89ABc3); vacc1x89AB = vpadalq_s16(vacc1x89AB, vprod1x89ABc3); vacc2x89AB = vpadalq_s16(vacc2x89AB, vprod2x89ABc3); vacc3x89AB = vpadalq_s16(vacc3x89AB, vprod3x89ABc3); int16x8_t vprod0xCDEFc3 = vmull_s8(vbCDEFc3x0, va0x0); int16x8_t vprod1xCDEFc3 = vmull_s8(vbCDEFc3x0, va1x0); int16x8_t vprod2xCDEFc3 = vmull_s8(vbCDEFc3x0, va2x0); int16x8_t vprod3xCDEFc3 = vmull_s8(vbCDEFc3x0, va3x0); vacc0xCDEF = vpadalq_s16(vacc0xCDEF, vprod0xCDEFc3); vacc1xCDEF = vpadalq_s16(vacc1xCDEF, vprod1xCDEFc3); vacc2xCDEF = vpadalq_s16(vacc2xCDEF, vprod2xCDEFc3); vacc3xCDEF = vpadalq_s16(vacc3xCDEF, vprod3xCDEFc3); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc0x89AB = vqshlq_s32(vacc0x89AB, vright_pre_shift); vacc0xCDEF = vqshlq_s32(vacc0xCDEF, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc1x89AB = vqshlq_s32(vacc1x89AB, vright_pre_shift); vacc1xCDEF = vqshlq_s32(vacc1xCDEF, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc2x89AB = vqshlq_s32(vacc2x89AB, vright_pre_shift); vacc2xCDEF = vqshlq_s32(vacc2xCDEF, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc3x89AB = vqshlq_s32(vacc3x89AB, vright_pre_shift); vacc3xCDEF = vqshlq_s32(vacc3xCDEF, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier); vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier); vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier); vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier); vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift); vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift); vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift); vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift); vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc0x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc1x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc2x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); int16x8_t vacc3x89ABCDEF = vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); vacc3x89ABCDEF = vqaddq_s16(vacc3x89ABCDEF, voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF); int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF); int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc0x89ABCDEF = vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc1x89ABCDEF = vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc2x89ABCDEF = vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); int16x8_t vacc3x89ABCDEF = vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc0x89ABCDEF = vqaddq_s16(vacc0x89ABCDEF, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc1x89ABCDEF = vqaddq_s16(vacc1x89ABCDEF, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc2x89ABCDEF = vqaddq_s16(vacc2x89ABCDEF, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); vacc3x89ABCDEF = vqaddq_s16(vacc3x89ABCDEF, voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF)); int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF)); int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min); vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min); vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max); vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max); vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c3 + 0, vout3x0123456789ABCDEF); vst1q_s8(c2 + 0, vout2x0123456789ABCDEF); vst1q_s8(c1 + 0, vout1x0123456789ABCDEF); vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF)); if (nc & 8) { vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8; vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8; vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8; vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8; vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF)); vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF)); } if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
22,950
51.76092
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x16c4-minmax-rndnu-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x16c4__neondot( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x89AB = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0xCDEF = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc1x89AB = vacc0x89AB; int32x4_t vacc1xCDEF = vacc0xCDEF; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc2x89AB = vacc0x89AB; int32x4_t vacc2xCDEF = vacc0xCDEF; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc3x89AB = vacc0x89AB; int32x4_t vacc3xCDEF = vacc0xCDEF; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; // Inner accumulation loop along the 16 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 4x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; // Load a 8x16 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x8 * 8x16 --> 4x16. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0); vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0); vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0); vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb4567x89AB, va0x01234567, 1); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb4567xCDEF, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb4567x89AB, va1x01234567, 1); vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb4567xCDEF, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb4567x89AB, va2x01234567, 1); vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb4567xCDEF, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb4567x89AB, va3x01234567, 1); vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb4567xCDEF, va3x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 4x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); const int8x8_t va1x01234567 = vld1_s8(a1); const int8x8_t va2x01234567 = vld1_s8(a2); const int8x8_t va3x01234567 = vld1_s8(a3); // Load a 4x16 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x89AB = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123xCDEF = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x4 * 4x16 --> 4x16. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc0x89AB = vdotq_lane_s32(vacc0x89AB, vb0123x89AB, va0x01234567, 0); vacc0xCDEF = vdotq_lane_s32(vacc0xCDEF, vb0123xCDEF, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc1x89AB = vdotq_lane_s32(vacc1x89AB, vb0123x89AB, va1x01234567, 0); vacc1xCDEF = vdotq_lane_s32(vacc1xCDEF, vb0123xCDEF, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc2x89AB = vdotq_lane_s32(vacc2x89AB, vb0123x89AB, va2x01234567, 0); vacc2xCDEF = vdotq_lane_s32(vacc2xCDEF, vb0123xCDEF, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc3x89AB = vdotq_lane_s32(vacc3x89AB, vb0123x89AB, va3x01234567, 0); vacc3xCDEF = vdotq_lane_s32(vacc3xCDEF, vb0123xCDEF, va3x01234567, 0); } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift); vacc0x89AB = vshlq_s32(vacc0x89AB, vright_pre_shift); vacc0xCDEF = vshlq_s32(vacc0xCDEF, vright_pre_shift); vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift); vacc1x89AB = vshlq_s32(vacc1x89AB, vright_pre_shift); vacc1xCDEF = vshlq_s32(vacc1xCDEF, vright_pre_shift); vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift); vacc2x89AB = vshlq_s32(vacc2x89AB, vright_pre_shift); vacc2xCDEF = vshlq_s32(vacc2xCDEF, vright_pre_shift); vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift); vacc3x89AB = vshlq_s32(vacc3x89AB, vright_pre_shift); vacc3xCDEF = vshlq_s32(vacc3xCDEF, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc0x89AB = vqdmulhq_s32(vacc0x89AB, vmultiplier); vacc0xCDEF = vqdmulhq_s32(vacc0xCDEF, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc1x89AB = vqdmulhq_s32(vacc1x89AB, vmultiplier); vacc1xCDEF = vqdmulhq_s32(vacc1xCDEF, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc2x89AB = vqdmulhq_s32(vacc2x89AB, vmultiplier); vacc2xCDEF = vqdmulhq_s32(vacc2xCDEF, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc3x89AB = vqdmulhq_s32(vacc3x89AB, vmultiplier); vacc3xCDEF = vqdmulhq_s32(vacc3xCDEF, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc0x89AB = vrshlq_s32(vacc0x89AB, vright_post_shift); vacc0xCDEF = vrshlq_s32(vacc0xCDEF, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc1x89AB = vrshlq_s32(vacc1x89AB, vright_post_shift); vacc1xCDEF = vrshlq_s32(vacc1xCDEF, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc2x89AB = vrshlq_s32(vacc2x89AB, vright_post_shift); vacc2xCDEF = vrshlq_s32(vacc2xCDEF, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); vacc3x89AB = vrshlq_s32(vacc3x89AB, vright_post_shift); vacc3xCDEF = vrshlq_s32(vacc3xCDEF, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x89AB), vacc0xCDEF), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x89AB), vacc1xCDEF), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x89AB), vacc2xCDEF), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x89AB), vacc3xCDEF), voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc0x89ABCDEF); int8x16_t vout1x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc1x01234567), vacc1x89ABCDEF); int8x16_t vout2x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc2x89ABCDEF); int8x16_t vout3x0123456789ABCDEF = vqmovn_high_s16(vqmovn_s16(vacc3x01234567), vacc3x89ABCDEF); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc0x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x89AB), vqmovn_s32(vacc0xCDEF)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc1x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x89AB), vqmovn_s32(vacc1xCDEF)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc2x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x89AB), vqmovn_s32(vacc2xCDEF)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); const int16x8_t vacc3x89ABCDEF = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x89AB), vqmovn_s32(vacc3xCDEF)), voutput_zero_point); int8x16_t vout0x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc0x89ABCDEF)); int8x16_t vout1x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc1x01234567), vqmovn_s16(vacc1x89ABCDEF)); int8x16_t vout2x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc2x89ABCDEF)); int8x16_t vout3x0123456789ABCDEF = vcombine_s8(vqmovn_s16(vacc3x01234567), vqmovn_s16(vacc3x89ABCDEF)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout3x0123456789ABCDEF = vmaxq_s8(vout3x0123456789ABCDEF, voutput_min); vout2x0123456789ABCDEF = vmaxq_s8(vout2x0123456789ABCDEF, voutput_min); vout1x0123456789ABCDEF = vmaxq_s8(vout1x0123456789ABCDEF, voutput_min); vout0x0123456789ABCDEF = vmaxq_s8(vout0x0123456789ABCDEF, voutput_min); vout3x0123456789ABCDEF = vminq_s8(vout3x0123456789ABCDEF, voutput_max); vout2x0123456789ABCDEF = vminq_s8(vout2x0123456789ABCDEF, voutput_max); vout1x0123456789ABCDEF = vminq_s8(vout1x0123456789ABCDEF, voutput_max); vout0x0123456789ABCDEF = vminq_s8(vout0x0123456789ABCDEF, voutput_max); if (nc >= 16) { vst1q_s8(c3 + 0, vout3x0123456789ABCDEF); vst1q_s8(c2 + 0, vout2x0123456789ABCDEF); vst1q_s8(c1 + 0, vout1x0123456789ABCDEF); vst1q_s8(c0 + 0, vout0x0123456789ABCDEF); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 16; } else { int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vget_low_s8(vout2x0123456789ABCDEF), vget_low_s8(vout3x0123456789ABCDEF)); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vget_low_s8(vout0x0123456789ABCDEF), vget_low_s8(vout1x0123456789ABCDEF)); if (nc & 8) { vst1_s8(c3, vget_high_s8(vout2x01234567_3x01234567)); c3 += 8; vst1_s8(c2, vget_low_s8(vout2x01234567_3x01234567)); c2 += 8; vst1_s8(c1, vget_high_s8(vout0x01234567_1x01234567)); c1 += 8; vst1_s8(c0, vget_low_s8(vout0x01234567_1x01234567)); c0 += 8; vout2x01234567_3x01234567 = vcombine_s8(vget_high_s8(vout2x0123456789ABCDEF), vget_high_s8(vout3x0123456789ABCDEF)); vout0x01234567_1x01234567 = vcombine_s8(vget_high_s8(vout0x0123456789ABCDEF), vget_high_s8(vout1x0123456789ABCDEF)); } if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
18,272
53.222552
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x2-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale = params->fp32_scalar_fmagic.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
7,000
33.487685
116
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x2-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_imagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale = params->fp32_scalar_imagic.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0); int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1); int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0); int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1); int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0); int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout1x0 = math_max_s32(vout1x0, vmagic_min); vout1x1 = math_max_s32(vout1x1, vmagic_min); vout2x0 = math_max_s32(vout2x0, vmagic_min); vout2x1 = math_max_s32(vout2x1, vmagic_min); vout3x0 = math_max_s32(vout3x0, vmagic_min); vout3x1 = math_max_s32(vout3x1, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout1x0 = math_min_s32(vout1x0, vmagic_max); vout1x1 = math_min_s32(vout1x1, vmagic_max); vout2x0 = math_min_s32(vout2x0, vmagic_max); vout2x1 = math_min_s32(vout2x1, vmagic_max); vout3x0 = math_min_s32(vout3x0, vmagic_max); vout3x1 = math_min_s32(vout3x1, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout1x0 -= vmagic_bias_less_zero_point; vout1x1 -= vmagic_bias_less_zero_point; vout2x0 -= vmagic_bias_less_zero_point; vout2x1 -= vmagic_bias_less_zero_point; vout3x0 -= vmagic_bias_less_zero_point; vout3x1 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
6,643
30.339623
102
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x2-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__scalar_lrintf( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale = params->fp32_scalar_lrintf.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0); const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1); const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0); const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1); const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0); const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout1x0 = vrndacc1x0 + voutput_zero_point; int32_t vout1x1 = vrndacc1x1 + voutput_zero_point; int32_t vout2x0 = vrndacc2x0 + voutput_zero_point; int32_t vout2x1 = vrndacc2x1 + voutput_zero_point; int32_t vout3x0 = vrndacc3x0 + voutput_zero_point; int32_t vout3x1 = vrndacc3x1 + voutput_zero_point; if XNN_LIKELY(nc >= 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
6,821
32.605911
100
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x2-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x2__wasm_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; const float vscale = params->fp32_scalar_fmagic.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
7,158
34.26601
116
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x2-minmax-rndnu-scalar.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x2__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = unaligned_indexed_load_s32(w, 0); int32_t vacc0x1 = unaligned_indexed_load_s32(w, 1); int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; w = (const void*) ((const int32_t*) w + 2); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; w = (const void*) ((const int8_t*) w + 2); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const int32_t vmultiplier = params->rndnu_scalar.multiplier; const int64_t vrounding = params->rndnu_scalar.rounding; const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding; const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding; const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding; const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding; const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding; const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding; const int64_t vextacc3x0 = math_mulext_s32(vacc3x0, vmultiplier) + vrounding; const int64_t vextacc3x1 = math_mulext_s32(vacc3x1, vmultiplier) + vrounding; const uint32_t vshift = params->rndnu_scalar.shift; int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift); int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift); int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift); int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift); int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift); int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift); int32_t vout3x0 = (int32_t) math_asr_s64(vextacc3x0, vshift); int32_t vout3x1 = (int32_t) math_asr_s64(vextacc3x1, vshift); const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point; vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point); vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point); vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point); vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point); vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point); vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point); vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point); vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point); const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point; vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point); vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point); vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point); vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point); vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point); vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point); vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point); vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point); const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point; vout0x0 += voutput_zero_point; vout0x1 += voutput_zero_point; vout1x0 += voutput_zero_point; vout1x1 += voutput_zero_point; vout2x0 += voutput_zero_point; vout2x1 += voutput_zero_point; vout3x0 += voutput_zero_point; vout3x1 += voutput_zero_point; if XNN_LIKELY(nc >= 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 2; } else { if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
6,873
34.251282
96
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale = params->fp32_scalar_fmagic.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc0x2 *= vscale; vfpacc0x3 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc1x2 *= vscale; vfpacc1x3 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc2x2 *= vscale; vfpacc2x3 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; vfpacc3x2 *= vscale; vfpacc3x3 *= vscale; const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point); vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point); vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; vfpacc3x2 += vmagic_bias; vfpacc3x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point; int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point; int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point; int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
10,975
36.333333
116
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_imagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale = params->fp32_scalar_imagic.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc0x2 *= vscale; vfpacc0x3 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc1x2 *= vscale; vfpacc1x3 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc2x2 *= vscale; vfpacc2x3 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; vfpacc3x2 *= vscale; vfpacc3x3 *= vscale; const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; vfpacc3x2 += vmagic_bias; vfpacc3x3 += vmagic_bias; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0); int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1); int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2); int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3); int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0); int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1); int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2); int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3); int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0); int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1); int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2); int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3); int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0); int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1); int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2); int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3); const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; vout0x0 = math_max_s32(vout0x0, vmagic_min); vout0x1 = math_max_s32(vout0x1, vmagic_min); vout0x2 = math_max_s32(vout0x2, vmagic_min); vout0x3 = math_max_s32(vout0x3, vmagic_min); vout1x0 = math_max_s32(vout1x0, vmagic_min); vout1x1 = math_max_s32(vout1x1, vmagic_min); vout1x2 = math_max_s32(vout1x2, vmagic_min); vout1x3 = math_max_s32(vout1x3, vmagic_min); vout2x0 = math_max_s32(vout2x0, vmagic_min); vout2x1 = math_max_s32(vout2x1, vmagic_min); vout2x2 = math_max_s32(vout2x2, vmagic_min); vout2x3 = math_max_s32(vout2x3, vmagic_min); vout3x0 = math_max_s32(vout3x0, vmagic_min); vout3x1 = math_max_s32(vout3x1, vmagic_min); vout3x2 = math_max_s32(vout3x2, vmagic_min); vout3x3 = math_max_s32(vout3x3, vmagic_min); const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; vout0x0 = math_min_s32(vout0x0, vmagic_max); vout0x1 = math_min_s32(vout0x1, vmagic_max); vout0x2 = math_min_s32(vout0x2, vmagic_max); vout0x3 = math_min_s32(vout0x3, vmagic_max); vout1x0 = math_min_s32(vout1x0, vmagic_max); vout1x1 = math_min_s32(vout1x1, vmagic_max); vout1x2 = math_min_s32(vout1x2, vmagic_max); vout1x3 = math_min_s32(vout1x3, vmagic_max); vout2x0 = math_min_s32(vout2x0, vmagic_max); vout2x1 = math_min_s32(vout2x1, vmagic_max); vout2x2 = math_min_s32(vout2x2, vmagic_max); vout2x3 = math_min_s32(vout2x3, vmagic_max); vout3x0 = math_min_s32(vout3x0, vmagic_max); vout3x1 = math_min_s32(vout3x1, vmagic_max); vout3x2 = math_min_s32(vout3x2, vmagic_max); vout3x3 = math_min_s32(vout3x3, vmagic_max); const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; vout0x0 -= vmagic_bias_less_zero_point; vout0x1 -= vmagic_bias_less_zero_point; vout0x2 -= vmagic_bias_less_zero_point; vout0x3 -= vmagic_bias_less_zero_point; vout1x0 -= vmagic_bias_less_zero_point; vout1x1 -= vmagic_bias_less_zero_point; vout1x2 -= vmagic_bias_less_zero_point; vout1x3 -= vmagic_bias_less_zero_point; vout2x0 -= vmagic_bias_less_zero_point; vout2x1 -= vmagic_bias_less_zero_point; vout2x2 -= vmagic_bias_less_zero_point; vout2x3 -= vmagic_bias_less_zero_point; vout3x0 -= vmagic_bias_less_zero_point; vout3x1 -= vmagic_bias_less_zero_point; vout3x2 -= vmagic_bias_less_zero_point; vout3x3 -= vmagic_bias_less_zero_point; if XNN_LIKELY(nc >= 4) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
10,338
32.244373
102
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__scalar_lrintf( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale = params->fp32_scalar_lrintf.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc0x2 *= vscale; vfpacc0x3 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc1x2 *= vscale; vfpacc1x3 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc2x2 *= vscale; vfpacc2x3 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; vfpacc3x2 *= vscale; vfpacc3x3 *= vscale; const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; vfpacc0x0 = math_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = math_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = math_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = math_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = math_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = math_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = math_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = math_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = math_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = math_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = math_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = math_max_f32(vfpacc2x3, voutput_min_less_zero_point); vfpacc3x0 = math_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = math_max_f32(vfpacc3x1, voutput_min_less_zero_point); vfpacc3x2 = math_max_f32(vfpacc3x2, voutput_min_less_zero_point); vfpacc3x3 = math_max_f32(vfpacc3x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; vfpacc0x0 = math_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = math_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = math_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = math_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = math_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = math_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = math_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = math_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = math_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = math_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = math_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = math_min_f32(vfpacc2x3, voutput_max_less_zero_point); vfpacc3x0 = math_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = math_min_f32(vfpacc3x1, voutput_max_less_zero_point); vfpacc3x2 = math_min_f32(vfpacc3x2, voutput_max_less_zero_point); vfpacc3x3 = math_min_f32(vfpacc3x3, voutput_max_less_zero_point); const int32_t vrndacc0x0 = (int32_t) lrintf(vfpacc0x0); const int32_t vrndacc0x1 = (int32_t) lrintf(vfpacc0x1); const int32_t vrndacc0x2 = (int32_t) lrintf(vfpacc0x2); const int32_t vrndacc0x3 = (int32_t) lrintf(vfpacc0x3); const int32_t vrndacc1x0 = (int32_t) lrintf(vfpacc1x0); const int32_t vrndacc1x1 = (int32_t) lrintf(vfpacc1x1); const int32_t vrndacc1x2 = (int32_t) lrintf(vfpacc1x2); const int32_t vrndacc1x3 = (int32_t) lrintf(vfpacc1x3); const int32_t vrndacc2x0 = (int32_t) lrintf(vfpacc2x0); const int32_t vrndacc2x1 = (int32_t) lrintf(vfpacc2x1); const int32_t vrndacc2x2 = (int32_t) lrintf(vfpacc2x2); const int32_t vrndacc2x3 = (int32_t) lrintf(vfpacc2x3); const int32_t vrndacc3x0 = (int32_t) lrintf(vfpacc3x0); const int32_t vrndacc3x1 = (int32_t) lrintf(vfpacc3x1); const int32_t vrndacc3x2 = (int32_t) lrintf(vfpacc3x2); const int32_t vrndacc3x3 = (int32_t) lrintf(vfpacc3x3); const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; int32_t vout0x0 = vrndacc0x0 + voutput_zero_point; int32_t vout0x1 = vrndacc0x1 + voutput_zero_point; int32_t vout0x2 = vrndacc0x2 + voutput_zero_point; int32_t vout0x3 = vrndacc0x3 + voutput_zero_point; int32_t vout1x0 = vrndacc1x0 + voutput_zero_point; int32_t vout1x1 = vrndacc1x1 + voutput_zero_point; int32_t vout1x2 = vrndacc1x2 + voutput_zero_point; int32_t vout1x3 = vrndacc1x3 + voutput_zero_point; int32_t vout2x0 = vrndacc2x0 + voutput_zero_point; int32_t vout2x1 = vrndacc2x1 + voutput_zero_point; int32_t vout2x2 = vrndacc2x2 + voutput_zero_point; int32_t vout2x3 = vrndacc2x3 + voutput_zero_point; int32_t vout3x0 = vrndacc3x0 + voutput_zero_point; int32_t vout3x1 = vrndacc3x1 + voutput_zero_point; int32_t vout3x2 = vrndacc3x2 + voutput_zero_point; int32_t vout3x3 = vrndacc3x3 + voutput_zero_point; if XNN_LIKELY(nc >= 4) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
10,700
35.397959
100
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4__wasm_fmagic( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); float vfpacc0x0 = (float) vacc0x0; float vfpacc0x1 = (float) vacc0x1; float vfpacc0x2 = (float) vacc0x2; float vfpacc0x3 = (float) vacc0x3; float vfpacc1x0 = (float) vacc1x0; float vfpacc1x1 = (float) vacc1x1; float vfpacc1x2 = (float) vacc1x2; float vfpacc1x3 = (float) vacc1x3; float vfpacc2x0 = (float) vacc2x0; float vfpacc2x1 = (float) vacc2x1; float vfpacc2x2 = (float) vacc2x2; float vfpacc2x3 = (float) vacc2x3; float vfpacc3x0 = (float) vacc3x0; float vfpacc3x1 = (float) vacc3x1; float vfpacc3x2 = (float) vacc3x2; float vfpacc3x3 = (float) vacc3x3; const float vscale = params->fp32_scalar_fmagic.scale; vfpacc0x0 *= vscale; vfpacc0x1 *= vscale; vfpacc0x2 *= vscale; vfpacc0x3 *= vscale; vfpacc1x0 *= vscale; vfpacc1x1 *= vscale; vfpacc1x2 *= vscale; vfpacc1x3 *= vscale; vfpacc2x0 *= vscale; vfpacc2x1 *= vscale; vfpacc2x2 *= vscale; vfpacc2x3 *= vscale; vfpacc3x0 *= vscale; vfpacc3x1 *= vscale; vfpacc3x2 *= vscale; vfpacc3x3 *= vscale; const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; vfpacc0x0 = __builtin_wasm_max_f32(vfpacc0x0, voutput_min_less_zero_point); vfpacc0x1 = __builtin_wasm_max_f32(vfpacc0x1, voutput_min_less_zero_point); vfpacc0x2 = __builtin_wasm_max_f32(vfpacc0x2, voutput_min_less_zero_point); vfpacc0x3 = __builtin_wasm_max_f32(vfpacc0x3, voutput_min_less_zero_point); vfpacc1x0 = __builtin_wasm_max_f32(vfpacc1x0, voutput_min_less_zero_point); vfpacc1x1 = __builtin_wasm_max_f32(vfpacc1x1, voutput_min_less_zero_point); vfpacc1x2 = __builtin_wasm_max_f32(vfpacc1x2, voutput_min_less_zero_point); vfpacc1x3 = __builtin_wasm_max_f32(vfpacc1x3, voutput_min_less_zero_point); vfpacc2x0 = __builtin_wasm_max_f32(vfpacc2x0, voutput_min_less_zero_point); vfpacc2x1 = __builtin_wasm_max_f32(vfpacc2x1, voutput_min_less_zero_point); vfpacc2x2 = __builtin_wasm_max_f32(vfpacc2x2, voutput_min_less_zero_point); vfpacc2x3 = __builtin_wasm_max_f32(vfpacc2x3, voutput_min_less_zero_point); vfpacc3x0 = __builtin_wasm_max_f32(vfpacc3x0, voutput_min_less_zero_point); vfpacc3x1 = __builtin_wasm_max_f32(vfpacc3x1, voutput_min_less_zero_point); vfpacc3x2 = __builtin_wasm_max_f32(vfpacc3x2, voutput_min_less_zero_point); vfpacc3x3 = __builtin_wasm_max_f32(vfpacc3x3, voutput_min_less_zero_point); const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; vfpacc0x0 = __builtin_wasm_min_f32(vfpacc0x0, voutput_max_less_zero_point); vfpacc0x1 = __builtin_wasm_min_f32(vfpacc0x1, voutput_max_less_zero_point); vfpacc0x2 = __builtin_wasm_min_f32(vfpacc0x2, voutput_max_less_zero_point); vfpacc0x3 = __builtin_wasm_min_f32(vfpacc0x3, voutput_max_less_zero_point); vfpacc1x0 = __builtin_wasm_min_f32(vfpacc1x0, voutput_max_less_zero_point); vfpacc1x1 = __builtin_wasm_min_f32(vfpacc1x1, voutput_max_less_zero_point); vfpacc1x2 = __builtin_wasm_min_f32(vfpacc1x2, voutput_max_less_zero_point); vfpacc1x3 = __builtin_wasm_min_f32(vfpacc1x3, voutput_max_less_zero_point); vfpacc2x0 = __builtin_wasm_min_f32(vfpacc2x0, voutput_max_less_zero_point); vfpacc2x1 = __builtin_wasm_min_f32(vfpacc2x1, voutput_max_less_zero_point); vfpacc2x2 = __builtin_wasm_min_f32(vfpacc2x2, voutput_max_less_zero_point); vfpacc2x3 = __builtin_wasm_min_f32(vfpacc2x3, voutput_max_less_zero_point); vfpacc3x0 = __builtin_wasm_min_f32(vfpacc3x0, voutput_max_less_zero_point); vfpacc3x1 = __builtin_wasm_min_f32(vfpacc3x1, voutput_max_less_zero_point); vfpacc3x2 = __builtin_wasm_min_f32(vfpacc3x2, voutput_max_less_zero_point); vfpacc3x3 = __builtin_wasm_min_f32(vfpacc3x3, voutput_max_less_zero_point); const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; vfpacc0x0 += vmagic_bias; vfpacc0x1 += vmagic_bias; vfpacc0x2 += vmagic_bias; vfpacc0x3 += vmagic_bias; vfpacc1x0 += vmagic_bias; vfpacc1x1 += vmagic_bias; vfpacc1x2 += vmagic_bias; vfpacc1x3 += vmagic_bias; vfpacc2x0 += vmagic_bias; vfpacc2x1 += vmagic_bias; vfpacc2x2 += vmagic_bias; vfpacc2x3 += vmagic_bias; vfpacc3x0 += vmagic_bias; vfpacc3x1 += vmagic_bias; vfpacc3x2 += vmagic_bias; vfpacc3x3 += vmagic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; int32_t vout0x0 = (int32_t) float_as_uint32(vfpacc0x0) - vmagic_bias_less_output_zero_point; int32_t vout0x1 = (int32_t) float_as_uint32(vfpacc0x1) - vmagic_bias_less_output_zero_point; int32_t vout0x2 = (int32_t) float_as_uint32(vfpacc0x2) - vmagic_bias_less_output_zero_point; int32_t vout0x3 = (int32_t) float_as_uint32(vfpacc0x3) - vmagic_bias_less_output_zero_point; int32_t vout1x0 = (int32_t) float_as_uint32(vfpacc1x0) - vmagic_bias_less_output_zero_point; int32_t vout1x1 = (int32_t) float_as_uint32(vfpacc1x1) - vmagic_bias_less_output_zero_point; int32_t vout1x2 = (int32_t) float_as_uint32(vfpacc1x2) - vmagic_bias_less_output_zero_point; int32_t vout1x3 = (int32_t) float_as_uint32(vfpacc1x3) - vmagic_bias_less_output_zero_point; int32_t vout2x0 = (int32_t) float_as_uint32(vfpacc2x0) - vmagic_bias_less_output_zero_point; int32_t vout2x1 = (int32_t) float_as_uint32(vfpacc2x1) - vmagic_bias_less_output_zero_point; int32_t vout2x2 = (int32_t) float_as_uint32(vfpacc2x2) - vmagic_bias_less_output_zero_point; int32_t vout2x3 = (int32_t) float_as_uint32(vfpacc2x3) - vmagic_bias_less_output_zero_point; int32_t vout3x0 = (int32_t) float_as_uint32(vfpacc3x0) - vmagic_bias_less_output_zero_point; int32_t vout3x1 = (int32_t) float_as_uint32(vfpacc3x1) - vmagic_bias_less_output_zero_point; int32_t vout3x2 = (int32_t) float_as_uint32(vfpacc3x2) - vmagic_bias_less_output_zero_point; int32_t vout3x3 = (int32_t) float_as_uint32(vfpacc3x3) - vmagic_bias_less_output_zero_point; if XNN_LIKELY(nc >= 4) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
11,293
37.414966
116
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4-minmax-rndnu-scalar.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/math.h> #include <xnnpack/gemm.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x4__scalar( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32_t vacc0x0 = ((const int32_t*) w)[0]; int32_t vacc0x1 = ((const int32_t*) w)[1]; int32_t vacc0x2 = ((const int32_t*) w)[2]; int32_t vacc0x3 = ((const int32_t*) w)[3]; int32_t vacc1x0 = vacc0x0; int32_t vacc1x1 = vacc0x1; int32_t vacc1x2 = vacc0x2; int32_t vacc1x3 = vacc0x3; int32_t vacc2x0 = vacc0x0; int32_t vacc2x1 = vacc0x1; int32_t vacc2x2 = vacc0x2; int32_t vacc2x3 = vacc0x3; int32_t vacc3x0 = vacc0x0; int32_t vacc3x1 = vacc0x1; int32_t vacc3x2 = vacc0x2; int32_t vacc3x3 = vacc0x3; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; assert(a0 != NULL); if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; assert(a1 != NULL); if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; assert(a2 != NULL); if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; assert(a3 != NULL); if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const int32_t va0 = (int32_t) *a0++; const int32_t va1 = (int32_t) *a1++; const int32_t va2 = (int32_t) *a2++; const int32_t va3 = (int32_t) *a3++; const int32_t vb0 = (int32_t) ((const int8_t*) w)[0]; const int32_t vb1 = (int32_t) ((const int8_t*) w)[1]; const int32_t vb2 = (int32_t) ((const int8_t*) w)[2]; const int32_t vb3 = (int32_t) ((const int8_t*) w)[3]; w = (const void*) ((const int8_t*) w + 4); vacc0x0 += va0 * vb0; vacc0x1 += va0 * vb1; vacc0x2 += va0 * vb2; vacc0x3 += va0 * vb3; vacc1x0 += va1 * vb0; vacc1x1 += va1 * vb1; vacc1x2 += va1 * vb2; vacc1x3 += va1 * vb3; vacc2x0 += va2 * vb0; vacc2x1 += va2 * vb1; vacc2x2 += va2 * vb2; vacc2x3 += va2 * vb3; vacc3x0 += va3 * vb0; vacc3x1 += va3 * vb1; vacc3x2 += va3 * vb2; vacc3x3 += va3 * vb3; k -= sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const int32_t vmultiplier = params->rndnu_scalar.multiplier; const int64_t vrounding = params->rndnu_scalar.rounding; const int64_t vextacc0x0 = math_mulext_s32(vacc0x0, vmultiplier) + vrounding; const int64_t vextacc0x1 = math_mulext_s32(vacc0x1, vmultiplier) + vrounding; const int64_t vextacc0x2 = math_mulext_s32(vacc0x2, vmultiplier) + vrounding; const int64_t vextacc0x3 = math_mulext_s32(vacc0x3, vmultiplier) + vrounding; const int64_t vextacc1x0 = math_mulext_s32(vacc1x0, vmultiplier) + vrounding; const int64_t vextacc1x1 = math_mulext_s32(vacc1x1, vmultiplier) + vrounding; const int64_t vextacc1x2 = math_mulext_s32(vacc1x2, vmultiplier) + vrounding; const int64_t vextacc1x3 = math_mulext_s32(vacc1x3, vmultiplier) + vrounding; const int64_t vextacc2x0 = math_mulext_s32(vacc2x0, vmultiplier) + vrounding; const int64_t vextacc2x1 = math_mulext_s32(vacc2x1, vmultiplier) + vrounding; const int64_t vextacc2x2 = math_mulext_s32(vacc2x2, vmultiplier) + vrounding; const int64_t vextacc2x3 = math_mulext_s32(vacc2x3, vmultiplier) + vrounding; const int64_t vextacc3x0 = math_mulext_s32(vacc3x0, vmultiplier) + vrounding; const int64_t vextacc3x1 = math_mulext_s32(vacc3x1, vmultiplier) + vrounding; const int64_t vextacc3x2 = math_mulext_s32(vacc3x2, vmultiplier) + vrounding; const int64_t vextacc3x3 = math_mulext_s32(vacc3x3, vmultiplier) + vrounding; const uint32_t vshift = params->rndnu_scalar.shift; int32_t vout0x0 = (int32_t) math_asr_s64(vextacc0x0, vshift); int32_t vout0x1 = (int32_t) math_asr_s64(vextacc0x1, vshift); int32_t vout0x2 = (int32_t) math_asr_s64(vextacc0x2, vshift); int32_t vout0x3 = (int32_t) math_asr_s64(vextacc0x3, vshift); int32_t vout1x0 = (int32_t) math_asr_s64(vextacc1x0, vshift); int32_t vout1x1 = (int32_t) math_asr_s64(vextacc1x1, vshift); int32_t vout1x2 = (int32_t) math_asr_s64(vextacc1x2, vshift); int32_t vout1x3 = (int32_t) math_asr_s64(vextacc1x3, vshift); int32_t vout2x0 = (int32_t) math_asr_s64(vextacc2x0, vshift); int32_t vout2x1 = (int32_t) math_asr_s64(vextacc2x1, vshift); int32_t vout2x2 = (int32_t) math_asr_s64(vextacc2x2, vshift); int32_t vout2x3 = (int32_t) math_asr_s64(vextacc2x3, vshift); int32_t vout3x0 = (int32_t) math_asr_s64(vextacc3x0, vshift); int32_t vout3x1 = (int32_t) math_asr_s64(vextacc3x1, vshift); int32_t vout3x2 = (int32_t) math_asr_s64(vextacc3x2, vshift); int32_t vout3x3 = (int32_t) math_asr_s64(vextacc3x3, vshift); const int32_t voutput_min_less_zero_point = params->rndnu_scalar.output_min_less_zero_point; vout0x0 = math_max_s32(vout0x0, voutput_min_less_zero_point); vout0x1 = math_max_s32(vout0x1, voutput_min_less_zero_point); vout0x2 = math_max_s32(vout0x2, voutput_min_less_zero_point); vout0x3 = math_max_s32(vout0x3, voutput_min_less_zero_point); vout1x0 = math_max_s32(vout1x0, voutput_min_less_zero_point); vout1x1 = math_max_s32(vout1x1, voutput_min_less_zero_point); vout1x2 = math_max_s32(vout1x2, voutput_min_less_zero_point); vout1x3 = math_max_s32(vout1x3, voutput_min_less_zero_point); vout2x0 = math_max_s32(vout2x0, voutput_min_less_zero_point); vout2x1 = math_max_s32(vout2x1, voutput_min_less_zero_point); vout2x2 = math_max_s32(vout2x2, voutput_min_less_zero_point); vout2x3 = math_max_s32(vout2x3, voutput_min_less_zero_point); vout3x0 = math_max_s32(vout3x0, voutput_min_less_zero_point); vout3x1 = math_max_s32(vout3x1, voutput_min_less_zero_point); vout3x2 = math_max_s32(vout3x2, voutput_min_less_zero_point); vout3x3 = math_max_s32(vout3x3, voutput_min_less_zero_point); const int32_t voutput_max_less_zero_point = params->rndnu_scalar.output_max_less_zero_point; vout0x0 = math_min_s32(vout0x0, voutput_max_less_zero_point); vout0x1 = math_min_s32(vout0x1, voutput_max_less_zero_point); vout0x2 = math_min_s32(vout0x2, voutput_max_less_zero_point); vout0x3 = math_min_s32(vout0x3, voutput_max_less_zero_point); vout1x0 = math_min_s32(vout1x0, voutput_max_less_zero_point); vout1x1 = math_min_s32(vout1x1, voutput_max_less_zero_point); vout1x2 = math_min_s32(vout1x2, voutput_max_less_zero_point); vout1x3 = math_min_s32(vout1x3, voutput_max_less_zero_point); vout2x0 = math_min_s32(vout2x0, voutput_max_less_zero_point); vout2x1 = math_min_s32(vout2x1, voutput_max_less_zero_point); vout2x2 = math_min_s32(vout2x2, voutput_max_less_zero_point); vout2x3 = math_min_s32(vout2x3, voutput_max_less_zero_point); vout3x0 = math_min_s32(vout3x0, voutput_max_less_zero_point); vout3x1 = math_min_s32(vout3x1, voutput_max_less_zero_point); vout3x2 = math_min_s32(vout3x2, voutput_max_less_zero_point); vout3x3 = math_min_s32(vout3x3, voutput_max_less_zero_point); const int32_t voutput_zero_point = params->rndnu_scalar.output_zero_point; vout0x0 += voutput_zero_point; vout0x1 += voutput_zero_point; vout0x2 += voutput_zero_point; vout0x3 += voutput_zero_point; vout1x0 += voutput_zero_point; vout1x1 += voutput_zero_point; vout1x2 += voutput_zero_point; vout1x3 += voutput_zero_point; vout2x0 += voutput_zero_point; vout2x1 += voutput_zero_point; vout2x2 += voutput_zero_point; vout2x3 += voutput_zero_point; vout3x0 += voutput_zero_point; vout3x1 += voutput_zero_point; vout3x2 += voutput_zero_point; vout3x3 += voutput_zero_point; if XNN_LIKELY(nc >= 4) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; c3[2] = (int8_t) vout3x2; c3[3] = (int8_t) vout3x3; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; c2[2] = (int8_t) vout2x2; c2[3] = (int8_t) vout2x3; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; c1[2] = (int8_t) vout1x2; c1[3] = (int8_t) vout1x3; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; c0[2] = (int8_t) vout0x2; c0[3] = (int8_t) vout0x3; c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { c3[0] = (int8_t) vout3x0; c3[1] = (int8_t) vout3x1; vout3x0 = vout3x2; c3 += 2; c2[0] = (int8_t) vout2x0; c2[1] = (int8_t) vout2x1; vout2x0 = vout2x2; c2 += 2; c1[0] = (int8_t) vout1x0; c1[1] = (int8_t) vout1x1; vout1x0 = vout1x2; c1 += 2; c0[0] = (int8_t) vout0x0; c0[1] = (int8_t) vout0x1; vout0x0 = vout0x2; c0 += 2; } if (nc & 1) { c3[0] = (int8_t) vout3x0; c2[0] = (int8_t) vout2x0; c1[0] = (int8_t) vout1x0; c0[0] = (int8_t) vout0x0; } nc = 0; } } while (nc != 0); }
10,720
37.564748
96
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__avx_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
11,361
40.467153
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__avx_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
11,479
40.594203
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__sse2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3)))); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi16(vout, 6); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
11,992
42.140288
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__sse2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3)))); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi16(vout, 6); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
12,082
42.464029
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__sse41_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
11,363
40.474453
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__sse41_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0)); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1)); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(_mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
11,481
40.601449
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 += 8; const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 += 8; const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 += 8; const v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const v128_t vxb0 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); if (k > 2 * sizeof(int8_t)) { const v128_t vxb1 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); if (k > 4 * sizeof(int8_t)) { const v128_t vxb2 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c3, vout, 3); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c0, vout, 0); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c3, vout, 6); c3 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c0, vout, 0); c0 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c3, vout, 12); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c0, vout, 0); } nc = 0; } } while (nc != 0); }
10,708
38.662963
134
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 += 8; const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 += 8; const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 += 8; const v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const v128_t vxb0 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0)); if (k > 2 * sizeof(int8_t)) { const v128_t vxb1 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1)); if (k > 4 * sizeof(int8_t)) { const v128_t vxb2 = wasm_i16x8_load8x8(w); w = (const void*) ((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2)); } } } p -= 4 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c3, vout, 3); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c0, vout, 0); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c3, vout, 6); c3 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c0, vout, 0); c0 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c3, vout, 12); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c0, vout, 0); } nc = 0; } } while (nc != 0); }
10,604
38.570896
134
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__xop_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
11,050
38.751799
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2-sse.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2__xop_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(3, 3, 3, 3)), vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } if (k != 0) { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); const __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); const __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); const __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); const __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 0, 0, 0)), vxb0, vacc3x0123); if (k > 2 * sizeof(int8_t)) { const __m128i vb1 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(1, 1, 1, 1)), vxb1, vacc3x0123); if (k > 4 * sizeof(int8_t)) { const __m128i vb2 = _mm_loadl_epi64((const __m128i*) w); w = (const void*) ((const int8_t*) w + 8); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc0x0123); vacc1x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc1x0123); vacc2x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc2x0123); vacc3x0123 = _mm_maddd_epi16( _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(2, 2, 2, 2)), vxb2, vacc3x0123); } } } p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
11,168
38.889286
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-avx-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__avx_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
8,297
37.957746
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-avx-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__avx_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
8,415
38.144186
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-sse2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vsb01 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb01); const __m128i vxb0 = _mm_unpacklo_epi8(vb01, vsb01); const __m128i vxb1 = _mm_unpackhi_epi8(vb01, vsb01); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vsb23 = _mm_cmpgt_epi8(_mm_setzero_si128(), vb23); const __m128i vxb2 = _mm_unpacklo_epi8(vb23, vsb23); const __m128i vxb3 = _mm_unpackhi_epi8(vb23, vsb23); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3)))); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi16(vout, 6); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
8,760
39.373272
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-sse2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <emmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__sse2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_srai_epi16(_mm_unpacklo_epi8(va0, va0), 8); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_srai_epi16(_mm_unpacklo_epi8(va1, va1), 8); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_srai_epi16(_mm_unpacklo_epi8(va2, va2), 8); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_srai_epi16(_mm_unpacklo_epi8(va3, va3), 8); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_srai_epi16(_mm_unpacklo_epi8(vb0, vb0), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_srai_epi16(_mm_unpacklo_epi8(vb1, vb1), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_srai_epi16(_mm_unpacklo_epi8(vb2, vb2), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_srai_epi16(_mm_unpacklo_epi8(vb3, vb3), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse2.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse2.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse2.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->fp32_sse2.output_min); vacc01x0123 = _mm_max_epi16(vacc01x0123, voutput_min); vacc23x0123 = _mm_max_epi16(vacc23x0123, voutput_min); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(3, 3, 3, 3)))); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(2, 2, 2, 2)))); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_cvtsi128_si32(_mm_shuffle_epi32(vout, _MM_SHUFFLE(1, 1, 1, 1)))); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi16(vout, 6); *c2 = (int8_t) _mm_extract_epi16(vout, 4); *c1 = (int8_t) _mm_extract_epi16(vout, 2); *c0 = (int8_t) _mm_cvtsi128_si32(vout); } nc = 0; } } while (nc != 0); }
8,850
39.788018
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-sse41-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
8,299
37.967136
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-sse41-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <smmintrin.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__sse41_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb0)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb0)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb0)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb0)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb1)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb1)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb1)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb1)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb2)); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb2)); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb2)); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb2)); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_add_epi32(vacc0x0123, _mm_madd_epi16(vxa0, vxb3)); vacc1x0123 = _mm_add_epi32(vacc1x0123, _mm_madd_epi16(vxa1, vxb3)); vacc2x0123 = _mm_add_epi32(vacc2x0123, _mm_madd_epi16(vxa2, vxb3)); vacc3x0123 = _mm_add_epi32(vacc3x0123, _mm_madd_epi16(vxa3, vxb3)); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
8,417
38.153488
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 += 8; v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 += 8; const v128_t vb01 = wasm_v128_load(w); const v128_t vxb0 = wasm_i16x8_extend_low_i8x16(vb01); const v128_t vxb1 = wasm_i16x8_extend_high_i8x16(vb01); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vb23 = wasm_v128_load((const int8_t*) w + 16); const v128_t vxb2 = wasm_i16x8_extend_low_i8x16(vb23); const v128_t vxb3 = wasm_i16x8_extend_high_i8x16(vb23); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c3, vout, 3); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c0, vout, 0); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c3, vout, 6); c3 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c0, vout, 0); c0 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c3, vout, 12); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c0, vout, 0); } nc = 0; } } while (nc != 0); }
8,115
36.574074
134
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-wasmsimd-dot16x2-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-wasmsimd-dot16x2.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <wasm_simd128.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__wasmsimd_dot16x2_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { v128_t vacc0x0123 = wasm_v128_load(w); v128_t vacc1x0123 = vacc0x0123; v128_t vacc2x0123 = vacc0x0123; v128_t vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { v128_t vxa0 = wasm_i16x8_load8x8(a0); a0 += 8; v128_t vxa1 = wasm_i16x8_load8x8(a1); a1 += 8; v128_t vxa2 = wasm_i16x8_load8x8(a2); a2 += 8; v128_t vxa3 = wasm_i16x8_load8x8(a3); a3 += 8; const v128_t vxb0 = wasm_i16x8_load8x8(w); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb0)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb0)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb0)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb0)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb1)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb1)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb1)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb1)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb2)); vxa0 = wasm_v32x4_shuffle(vxa0, vxa0, 1, 2, 3, 4); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb2)); vxa1 = wasm_v32x4_shuffle(vxa1, vxa1, 1, 2, 3, 4); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb2)); vxa2 = wasm_v32x4_shuffle(vxa2, vxa2, 1, 2, 3, 4); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb2)); vxa3 = wasm_v32x4_shuffle(vxa3, vxa3, 1, 2, 3, 4); const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24); vacc0x0123 = wasm_i32x4_add(vacc0x0123, wasm_i32x4_dot_i16x8(vxa0, vxb3)); vacc1x0123 = wasm_i32x4_add(vacc1x0123, wasm_i32x4_dot_i16x8(vxa1, vxb3)); vacc2x0123 = wasm_i32x4_add(vacc2x0123, wasm_i32x4_dot_i16x8(vxa2, vxb3)); vacc3x0123 = wasm_i32x4_add(vacc3x0123, wasm_i32x4_dot_i16x8(vxa3, vxb3)); w = (const int8_t*) w + 32; k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123); vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123); vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123); vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123); const v128_t vscale = wasm_v128_load64_splat(params->fp32_wasmsimd.scale); vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale); vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale); vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale); vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale); const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias); vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias); vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias); vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias); vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias); const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min); vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min); vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min); vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min); vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min); const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point); vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point); vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point); vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point); vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point); v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123); v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123); v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123); const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max); vout = wasm_i8x16_min(vout, voutput_max); if (nc >= 4) { wasm_v128_store32_lane(c3, vout, 3); wasm_v128_store32_lane(c2, vout, 2); wasm_v128_store32_lane(c1, vout, 1); wasm_v128_store32_lane(c0, vout, 0); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { wasm_v128_store16_lane(c3, vout, 6); c3 += 2; wasm_v128_store16_lane(c2, vout, 4); c2 += 2; wasm_v128_store16_lane(c1, vout, 2); c1 += 2; wasm_v128_store16_lane(c0, vout, 0); c0 += 2; vout = wasm_u32x4_shr(vout, 16); } if (nc & 1) { wasm_v128_store8_lane(c3, vout, 12); wasm_v128_store8_lane(c2, vout, 8); wasm_v128_store8_lane(c1, vout, 4); wasm_v128_store8_lane(c0, vout, 0); } nc = 0; } } while (nc != 0); }
8,011
36.439252
134
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-xop-ld128.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__xop_ld128( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb01 = _mm_loadu_si128((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb01); const __m128i vxb1 = _mm_srai_epi16(_mm_unpackhi_epi8(vb01, vb01), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb23 = _mm_loadu_si128((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb23); const __m128i vxb3 = _mm_srai_epi16(_mm_unpackhi_epi8(vb23, vb23), 8); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
8,126
36.451613
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x4c2s4-minmax-fp32-xop-ld64.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/MRx4c2s4-sse.c.in // Generator: tools/xngen // // Copyright 2022 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #ifdef _MSC_VER #include <intrin.h> #else #include <x86intrin.h> #endif #include <xnnpack/igemm.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_igemm_minmax_fp32_ukernel_4x4c2s4__xop_ld64( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { __m128i vacc0x0123 = _mm_loadu_si128((const __m128i*) w); __m128i vacc1x0123 = vacc0x0123; __m128i vacc2x0123 = vacc0x0123; __m128i vacc3x0123 = vacc0x0123; w = (const void*) ((const int32_t*) w + 4); size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0); __m128i vxa0 = _mm_cvtepi8_epi16(va0); a0 += 8; const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1); __m128i vxa1 = _mm_cvtepi8_epi16(va1); a1 += 8; const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2); __m128i vxa2 = _mm_cvtepi8_epi16(va2); a2 += 8; const __m128i va3 = _mm_loadl_epi64((const __m128i*) a3); __m128i vxa3 = _mm_cvtepi8_epi16(va3); a3 += 8; const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w); const __m128i vxb0 = _mm_cvtepi8_epi16(vb0); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb0, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb0, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb0, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb0, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 8)); const __m128i vxb1 = _mm_cvtepi8_epi16(vb1); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb1, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb1, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb1, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb1, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 16)); const __m128i vxb2 = _mm_cvtepi8_epi16(vb2); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb2, vacc0x0123); vxa0 = _mm_shuffle_epi32(vxa0, _MM_SHUFFLE(0, 3, 2, 1)); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb2, vacc1x0123); vxa1 = _mm_shuffle_epi32(vxa1, _MM_SHUFFLE(0, 3, 2, 1)); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb2, vacc2x0123); vxa2 = _mm_shuffle_epi32(vxa2, _MM_SHUFFLE(0, 3, 2, 1)); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb2, vacc3x0123); vxa3 = _mm_shuffle_epi32(vxa3, _MM_SHUFFLE(0, 3, 2, 1)); const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((const int8_t*) w + 24)); const __m128i vxb3 = _mm_cvtepi8_epi16(vb3); vacc0x0123 = _mm_maddd_epi16(vxa0, vxb3, vacc0x0123); vacc1x0123 = _mm_maddd_epi16(vxa1, vxb3, vacc1x0123); vacc2x0123 = _mm_maddd_epi16(vxa2, vxb3, vacc2x0123); vacc3x0123 = _mm_maddd_epi16(vxa3, vxb3, vacc3x0123); w = (const void*) ((const int8_t*) w + 32); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123); __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123); __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123); __m128 vscaled3x0123 = _mm_cvtepi32_ps(vacc3x0123); const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale); vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale); vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale); vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale); vscaled3x0123 = _mm_mul_ps(vscaled3x0123, vscale); const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point); vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point); vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point); vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point); vscaled3x0123 = _mm_min_ps(vscaled3x0123, voutput_max_less_zero_point); vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123); vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123); vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123); vacc3x0123 = _mm_cvtps_epi32(vscaled3x0123); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point); __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point); __m128i vacc23x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc3x0123), voutput_zero_point); __m128i vout = _mm_packs_epi16(vacc01x0123, vacc23x0123); vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min)); if (nc >= 4) { unaligned_store_u32(c3, (uint32_t) _mm_extract_epi32(vout, 3)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); unaligned_store_u32(c2, (uint32_t) _mm_extract_epi32(vout, 2)); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); unaligned_store_u32(c1, (uint32_t) _mm_extract_epi32(vout, 1)); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); unaligned_store_u32(c0, (uint32_t) _mm_cvtsi128_si32(vout)); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 4; } else { if (nc & 2) { unaligned_store_u16(c3, (uint16_t) _mm_extract_epi16(vout, 6)); c3 += 2; unaligned_store_u16(c2, (uint16_t) _mm_extract_epi16(vout, 4)); c2 += 2; unaligned_store_u16(c1, (uint16_t) _mm_extract_epi16(vout, 2)); c1 += 2; unaligned_store_u16(c0, (uint16_t) _mm_extract_epi16(vout, 0)); c0 += 2; vout = _mm_srli_epi32(vout, 16); } if (nc & 1) { *c3 = (int8_t) _mm_extract_epi8(vout, 12); *c2 = (int8_t) _mm_extract_epi8(vout, 8); *c1 = (int8_t) _mm_extract_epi8(vout, 4); *c0 = (int8_t) _mm_extract_epi8(vout, 0); } nc = 0; } } while (nc != 0); }
8,244
36.648402
108
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c16-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c16-neon-mlal.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c16__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 16 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; int32x4_t vacc2x0 = vacc0x0; int32x4_t vacc2x1 = vacc0x1; int32x4_t vacc2x2 = vacc0x2; int32x4_t vacc2x3 = vacc0x3; int32x4_t vacc2x4 = vacc0x4; int32x4_t vacc2x5 = vacc0x5; int32x4_t vacc2x6 = vacc0x6; int32x4_t vacc2x7 = vacc0x7; int32x4_t vacc3x0 = vacc0x0; int32x4_t vacc3x1 = vacc0x1; int32x4_t vacc3x2 = vacc0x2; int32x4_t vacc3x3 = vacc0x3; int32x4_t vacc3x4 = vacc0x4; int32x4_t vacc3x5 = vacc0x5; int32x4_t vacc3x6 = vacc0x6; int32x4_t vacc3x7 = vacc0x7; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; // KC loop of 16 with up to 15 remainder size_t k = kc; while (k != 0) { const int8x16_t va0 = vld1q_s8(a0); a0 += 16; const int8x16_t va1 = vld1q_s8(a1); a1 += 16; const int8x16_t va2 = vld1q_s8(a2); a2 += 16; const int8x16_t va3 = vld1q_s8(a3); a3 += 16; const int8x16_t vb0 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb1 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb2 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb3 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb4 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb5 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb6 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); const int8x16_t vb7 = vld1q_s8(w); w = (const void*) ((uintptr_t) w + 16 * sizeof(int8_t)); int16x8_t vprod0x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va0)); int16x8_t vprod1x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va1)); int16x8_t vprod2x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va2)); int16x8_t vprod3x0 = vmull_s8(vget_low_s8(vb0), vget_low_s8(va3)); vprod0x0 = vmlal_s8(vprod0x0, vget_high_s8(vb0), vget_high_s8(va0)); vprod1x0 = vmlal_s8(vprod1x0, vget_high_s8(vb0), vget_high_s8(va1)); vprod2x0 = vmlal_s8(vprod2x0, vget_high_s8(vb0), vget_high_s8(va2)); vprod3x0 = vmlal_s8(vprod3x0, vget_high_s8(vb0), vget_high_s8(va3)); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0); int16x8_t vprod0x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va0)); int16x8_t vprod1x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va1)); int16x8_t vprod2x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va2)); int16x8_t vprod3x1 = vmull_s8(vget_low_s8(vb1), vget_low_s8(va3)); vprod0x1 = vmlal_s8(vprod0x1, vget_high_s8(vb1), vget_high_s8(va0)); vprod1x1 = vmlal_s8(vprod1x1, vget_high_s8(vb1), vget_high_s8(va1)); vprod2x1 = vmlal_s8(vprod2x1, vget_high_s8(vb1), vget_high_s8(va2)); vprod3x1 = vmlal_s8(vprod3x1, vget_high_s8(vb1), vget_high_s8(va3)); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1); int16x8_t vprod0x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va0)); int16x8_t vprod1x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va1)); int16x8_t vprod2x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va2)); int16x8_t vprod3x2 = vmull_s8(vget_low_s8(vb2), vget_low_s8(va3)); vprod0x2 = vmlal_s8(vprod0x2, vget_high_s8(vb2), vget_high_s8(va0)); vprod1x2 = vmlal_s8(vprod1x2, vget_high_s8(vb2), vget_high_s8(va1)); vprod2x2 = vmlal_s8(vprod2x2, vget_high_s8(vb2), vget_high_s8(va2)); vprod3x2 = vmlal_s8(vprod3x2, vget_high_s8(vb2), vget_high_s8(va3)); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2); int16x8_t vprod0x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va0)); int16x8_t vprod1x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va1)); int16x8_t vprod2x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va2)); int16x8_t vprod3x3 = vmull_s8(vget_low_s8(vb3), vget_low_s8(va3)); vprod0x3 = vmlal_s8(vprod0x3, vget_high_s8(vb3), vget_high_s8(va0)); vprod1x3 = vmlal_s8(vprod1x3, vget_high_s8(vb3), vget_high_s8(va1)); vprod2x3 = vmlal_s8(vprod2x3, vget_high_s8(vb3), vget_high_s8(va2)); vprod3x3 = vmlal_s8(vprod3x3, vget_high_s8(vb3), vget_high_s8(va3)); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3); int16x8_t vprod0x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va0)); int16x8_t vprod1x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va1)); int16x8_t vprod2x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va2)); int16x8_t vprod3x4 = vmull_s8(vget_low_s8(vb4), vget_low_s8(va3)); vprod0x4 = vmlal_s8(vprod0x4, vget_high_s8(vb4), vget_high_s8(va0)); vprod1x4 = vmlal_s8(vprod1x4, vget_high_s8(vb4), vget_high_s8(va1)); vprod2x4 = vmlal_s8(vprod2x4, vget_high_s8(vb4), vget_high_s8(va2)); vprod3x4 = vmlal_s8(vprod3x4, vget_high_s8(vb4), vget_high_s8(va3)); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4); int16x8_t vprod0x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va0)); int16x8_t vprod1x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va1)); int16x8_t vprod2x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va2)); int16x8_t vprod3x5 = vmull_s8(vget_low_s8(vb5), vget_low_s8(va3)); vprod0x5 = vmlal_s8(vprod0x5, vget_high_s8(vb5), vget_high_s8(va0)); vprod1x5 = vmlal_s8(vprod1x5, vget_high_s8(vb5), vget_high_s8(va1)); vprod2x5 = vmlal_s8(vprod2x5, vget_high_s8(vb5), vget_high_s8(va2)); vprod3x5 = vmlal_s8(vprod3x5, vget_high_s8(vb5), vget_high_s8(va3)); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5); int16x8_t vprod0x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va0)); int16x8_t vprod1x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va1)); int16x8_t vprod2x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va2)); int16x8_t vprod3x6 = vmull_s8(vget_low_s8(vb6), vget_low_s8(va3)); vprod0x6 = vmlal_s8(vprod0x6, vget_high_s8(vb6), vget_high_s8(va0)); vprod1x6 = vmlal_s8(vprod1x6, vget_high_s8(vb6), vget_high_s8(va1)); vprod2x6 = vmlal_s8(vprod2x6, vget_high_s8(vb6), vget_high_s8(va2)); vprod3x6 = vmlal_s8(vprod3x6, vget_high_s8(vb6), vget_high_s8(va3)); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6); int16x8_t vprod0x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va0)); int16x8_t vprod1x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va1)); int16x8_t vprod2x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va2)); int16x8_t vprod3x7 = vmull_s8(vget_low_s8(vb7), vget_low_s8(va3)); vprod0x7 = vmlal_s8(vprod0x7, vget_high_s8(vb7), vget_high_s8(va0)); vprod1x7 = vmlal_s8(vprod1x7, vget_high_s8(vb7), vget_high_s8(va1)); vprod2x7 = vmlal_s8(vprod2x7, vget_high_s8(vb7), vget_high_s8(va2)); vprod3x7 = vmlal_s8(vprod3x7, vget_high_s8(vb7), vget_high_s8(va3)); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7); k -= 16 * sizeof(int8_t); } p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1); const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3); const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1); const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3); const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5); const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23); int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23); int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0)); const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1)); const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2)); const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3)); const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1); const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 ); const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4)); const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6)); const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7)); const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5); const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0)); const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1)); const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2)); const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3)); const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1); const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 ); const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4)); const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5)); const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6)); const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7)); const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5); const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 ); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
23,034
54.506024
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c2-minmax-rndnu-neon-mull-dup.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_dup( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 3)); const int8x8_t va1c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 3)); const int8x8_t va2c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 3)); const int8x8_t va3c3 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 3)); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1)); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); } } } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,823
52.670103
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c2-minmax-rndnu-neon-mull-ld1r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld1r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int16x4_t va00 = vld1_dup_s16((const void*)a0); const int16x4_t va01 = vld1_dup_s16((const void*)(a0 + 2)); const int16x4_t va02 = vld1_dup_s16((const void*)(a0 + 4)); const int16x4_t va03 = vld1_dup_s16((const void*)(a0 + 6)); a0 += 8; const int16x4_t va10 = vld1_dup_s16((const void*)a1); const int16x4_t va11 = vld1_dup_s16((const void*)(a1 + 2)); const int16x4_t va12 = vld1_dup_s16((const void*)(a1 + 4)); const int16x4_t va13 = vld1_dup_s16((const void*)(a1 + 6)); a1 += 8; const int16x4_t va20 = vld1_dup_s16((const void*)a2); const int16x4_t va21 = vld1_dup_s16((const void*)(a2 + 2)); const int16x4_t va22 = vld1_dup_s16((const void*)(a2 + 4)); const int16x4_t va23 = vld1_dup_s16((const void*)(a2 + 6)); a2 += 8; const int16x4_t va30 = vld1_dup_s16((const void*)a3); const int16x4_t va31 = vld1_dup_s16((const void*)(a3 + 2)); const int16x4_t va32 = vld1_dup_s16((const void*)(a3 + 4)); const int16x4_t va33 = vld1_dup_s16((const void*)(a3 + 6)); a3 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va00); const int8x8_t va1c0 = vreinterpret_s8_s16(va10); const int8x8_t va2c0 = vreinterpret_s8_s16(va20); const int8x8_t va3c0 = vreinterpret_s8_s16(va30); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va01); const int8x8_t va1c1 = vreinterpret_s8_s16(va11); const int8x8_t va2c1 = vreinterpret_s8_s16(va21); const int8x8_t va3c1 = vreinterpret_s8_s16(va31); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va02); const int8x8_t va1c2 = vreinterpret_s8_s16(va12); const int8x8_t va2c2 = vreinterpret_s8_s16(va22); const int8x8_t va3c2 = vreinterpret_s8_s16(va32); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va03); const int8x8_t va1c3 = vreinterpret_s8_s16(va13); const int8x8_t va2c3 = vreinterpret_s8_s16(va23); const int8x8_t va3c3 = vreinterpret_s8_s16(va33); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1)); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); } } } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
21,112
51.7825
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c2-minmax-rndnu-neon-mull-ld2r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld2r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int16x4x2_t va00 = vld2_dup_s16((const void*)a0); const int16x4x2_t va01 = vld2_dup_s16((const void*)(a0 + 4)); a0 += 8; const int16x4x2_t va10 = vld2_dup_s16((const void*)a1); const int16x4x2_t va11 = vld2_dup_s16((const void*)(a1 + 4)); a1 += 8; const int16x4x2_t va20 = vld2_dup_s16((const void*)a2); const int16x4x2_t va21 = vld2_dup_s16((const void*)(a2 + 4)); a2 += 8; const int16x4x2_t va30 = vld2_dup_s16((const void*)a3); const int16x4x2_t va31 = vld2_dup_s16((const void*)(a3 + 4)); a3 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va00.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s16(va10.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s16(va20.val[0]); const int8x8_t va3c0 = vreinterpret_s8_s16(va30.val[0]); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va00.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s16(va10.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s16(va20.val[1]); const int8x8_t va3c1 = vreinterpret_s8_s16(va30.val[1]); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va01.val[0]); const int8x8_t va1c2 = vreinterpret_s8_s16(va11.val[0]); const int8x8_t va2c2 = vreinterpret_s8_s16(va21.val[0]); const int8x8_t va3c2 = vreinterpret_s8_s16(va31.val[0]); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va01.val[1]); const int8x8_t va1c3 = vreinterpret_s8_s16(va11.val[1]); const int8x8_t va2c3 = vreinterpret_s8_s16(va21.val[1]); const int8x8_t va3c3 = vreinterpret_s8_s16(va31.val[1]); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1)); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); } } } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,696
51.798469
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c2-minmax-rndnu-neon-mull-ld4r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2__neon_mull_ld4r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 2 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc0x4567 = vld1q_s32(w); w = (const void*) ((uintptr_t) w + 4 * sizeof(int32_t)); int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int16x4x4_t va0 = vld4_dup_s16((const void*)a0); a0 += 8; const int16x4x4_t va1 = vld4_dup_s16((const void*)a1); a1 += 8; const int16x4x4_t va2 = vld4_dup_s16((const void*)a2); a2 += 8; const int16x4x4_t va3 = vld4_dup_s16((const void*)a3); a3 += 8; const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb0123c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s16(va1.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s16(va2.val[0]); const int8x8_t va3c0 = vreinterpret_s8_s16(va3.val[0]); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); const int8x8_t va0c1 = vreinterpret_s8_s16(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s16(va1.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s16(va2.val[1]); const int8x8_t va3c1 = vreinterpret_s8_s16(va3.val[1]); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); const int8x8_t va0c2 = vreinterpret_s8_s16(va0.val[2]); const int8x8_t va1c2 = vreinterpret_s8_s16(va1.val[2]); const int8x8_t va2c2 = vreinterpret_s8_s16(va2.val[2]); const int8x8_t va3c2 = vreinterpret_s8_s16(va3.val[2]); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); const int8x8_t va0c3 = vreinterpret_s8_s16(va0.val[3]); const int8x8_t va1c3 = vreinterpret_s8_s16(va1.val[3]); const int8x8_t va2c3 = vreinterpret_s8_s16(va2.val[3]); const int8x8_t va3c3 = vreinterpret_s8_s16(va3.val[3]); const int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3, va0c3); const int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3, va1c3); const int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3, va2c3); const int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3, va3c3); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); const int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3, va0c3); const int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3, va1c3); const int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3, va2c3); const int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3, va3c3); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb0123c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 0)); const int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0, va0c0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); const int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0, va0c0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); const int8x8_t va1c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 0)); const int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0, va1c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); const int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0, va1c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); const int8x8_t va2c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 0)); const int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0, va2c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); const int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0, va2c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); const int8x8_t va3c0 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 0)); const int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0, va3c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); const int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0, va3c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); if (k > 2 * sizeof(int8_t)) { const int8x8_t vb0123c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 1)); const int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1, va0c1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); const int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1, va0c1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); const int8x8_t va1c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 1)); const int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1, va1c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); const int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1, va1c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); const int8x8_t va2c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 1)); const int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1, va2c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); const int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1, va2c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); const int8x8_t va3c1 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 1)); const int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1, va3c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); const int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1, va3c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); if (k > 4 * sizeof(int8_t)) { const int8x8_t vb0123c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb4567c2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va0), 2)); const int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2, va0c2); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); const int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2, va0c2); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); const int8x8_t va1c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va1), 2)); const int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2, va1c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); const int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2, va1c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); const int8x8_t va2c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va2), 2)); const int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2, va2c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); const int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2, va2c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); const int8x8_t va3c2 = vreinterpret_s8_s16(vdup_lane_s16(vreinterpret_s16_s8(va3), 2)); const int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2, va3c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); const int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2, va3c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); } } } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,396
51.569588
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c2s4-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2s4__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va0x1 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va1x1 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va2x1 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; int8x8_t va3x1 = vld1_s8(a3); a3 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0); const int8x8_t vb0123c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c0 = vmlal_s8(vprod0x0123c0, vb0123c0x1, va0x1); vprod1x0123c0 = vmlal_s8(vprod1x0123c0, vb0123c0x1, va1x1); vprod2x0123c0 = vmlal_s8(vprod2x0123c0, vb0123c0x1, va2x1); vprod3x0123c0 = vmlal_s8(vprod3x0123c0, vb0123c0x1, va3x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0); const int8x8_t vb4567c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c0 = vmlal_s8(vprod0x4567c0, vb4567c0x1, va0x1); vprod1x4567c0 = vmlal_s8(vprod1x4567c0, vb4567c0x1, va1x1); vprod2x4567c0 = vmlal_s8(vprod2x4567c0, vb4567c0x1, va2x1); vprod3x4567c0 = vmlal_s8(vprod3x4567c0, vb4567c0x1, va3x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); va0x0 = vext_s8(va0x0, va0x0, 2); va0x1 = vext_s8(va0x1, va0x1, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va1x1 = vext_s8(va1x1, va1x1, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va2x1 = vext_s8(va2x1, va2x1, 2); va3x0 = vext_s8(va3x0, va3x0, 2); va3x1 = vext_s8(va3x1, va3x1, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0); const int8x8_t vb0123c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c1 = vmlal_s8(vprod0x0123c1, vb0123c1x1, va0x1); vprod1x0123c1 = vmlal_s8(vprod1x0123c1, vb0123c1x1, va1x1); vprod2x0123c1 = vmlal_s8(vprod2x0123c1, vb0123c1x1, va2x1); vprod3x0123c1 = vmlal_s8(vprod3x0123c1, vb0123c1x1, va3x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0); const int8x8_t vb4567c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c1 = vmlal_s8(vprod0x4567c1, vb4567c1x1, va0x1); vprod1x4567c1 = vmlal_s8(vprod1x4567c1, vb4567c1x1, va1x1); vprod2x4567c1 = vmlal_s8(vprod2x4567c1, vb4567c1x1, va2x1); vprod3x4567c1 = vmlal_s8(vprod3x4567c1, vb4567c1x1, va3x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); va0x0 = vext_s8(va0x0, va0x0, 2); va0x1 = vext_s8(va0x1, va0x1, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va1x1 = vext_s8(va1x1, va1x1, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va2x1 = vext_s8(va2x1, va2x1, 2); va3x0 = vext_s8(va3x0, va3x0, 2); va3x1 = vext_s8(va3x1, va3x1, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0); const int8x8_t vb0123c2x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c2 = vmlal_s8(vprod0x0123c2, vb0123c2x1, va0x1); vprod1x0123c2 = vmlal_s8(vprod1x0123c2, vb0123c2x1, va1x1); vprod2x0123c2 = vmlal_s8(vprod2x0123c2, vb0123c2x1, va2x1); vprod3x0123c2 = vmlal_s8(vprod3x0123c2, vb0123c2x1, va3x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0); const int8x8_t vb4567c2x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c2 = vmlal_s8(vprod0x4567c2, vb4567c2x1, va0x1); vprod1x4567c2 = vmlal_s8(vprod1x4567c2, vb4567c2x1, va1x1); vprod2x4567c2 = vmlal_s8(vprod2x4567c2, vb4567c2x1, va2x1); vprod3x4567c2 = vmlal_s8(vprod3x4567c2, vb4567c2x1, va3x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); va0x0 = vext_s8(va0x0, va0x0, 2); va0x1 = vext_s8(va0x1, va0x1, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va1x1 = vext_s8(va1x1, va1x1, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va2x1 = vext_s8(va2x1, va2x1, 2); va3x0 = vext_s8(va3x0, va3x0, 2); va3x1 = vext_s8(va3x1, va3x1, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0); const int8x8_t vb0123c3x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x0123c3 = vmlal_s8(vprod0x0123c3, vb0123c3x1, va0x1); vprod1x0123c3 = vmlal_s8(vprod1x0123c3, vb0123c3x1, va1x1); vprod2x0123c3 = vmlal_s8(vprod2x0123c3, vb0123c3x1, va2x1); vprod3x0123c3 = vmlal_s8(vprod3x0123c3, vb0123c3x1, va3x1); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0); const int8x8_t vb4567c3x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x4567c3 = vmlal_s8(vprod0x4567c3, vb4567c3x1, va0x1); vprod1x4567c3 = vmlal_s8(vprod1x4567c3, vb4567c3x1, va1x1); vprod2x4567c3 = vmlal_s8(vprod2x4567c3, vb4567c3x1, va2x1); vprod3x4567c3 = vmlal_s8(vprod3x4567c3, vb4567c3x1, va3x1); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); k -= 16 * sizeof(int8_t); } if (k != 0) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
22,677
49.847534
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c2s4-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c2-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c2s4__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; const int8x8_t vb0123c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c2x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb0123c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb4567c3x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x0123c0 = vmull_s8(vb0123c0x0, va0x0); int16x8_t vprod1x0123c0 = vmull_s8(vb0123c0x0, va1x0); int16x8_t vprod2x0123c0 = vmull_s8(vb0123c0x0, va2x0); int16x8_t vprod3x0123c0 = vmull_s8(vb0123c0x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c0); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c0); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c0); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c0); int16x8_t vprod0x4567c0 = vmull_s8(vb4567c0x0, va0x0); int16x8_t vprod1x4567c0 = vmull_s8(vb4567c0x0, va1x0); int16x8_t vprod2x4567c0 = vmull_s8(vb4567c0x0, va2x0); int16x8_t vprod3x4567c0 = vmull_s8(vb4567c0x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c0); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c0); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c0); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c0); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c1 = vmull_s8(vb0123c1x0, va0x0); int16x8_t vprod1x0123c1 = vmull_s8(vb0123c1x0, va1x0); int16x8_t vprod2x0123c1 = vmull_s8(vb0123c1x0, va2x0); int16x8_t vprod3x0123c1 = vmull_s8(vb0123c1x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c1); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c1); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c1); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c1); int16x8_t vprod0x4567c1 = vmull_s8(vb4567c1x0, va0x0); int16x8_t vprod1x4567c1 = vmull_s8(vb4567c1x0, va1x0); int16x8_t vprod2x4567c1 = vmull_s8(vb4567c1x0, va2x0); int16x8_t vprod3x4567c1 = vmull_s8(vb4567c1x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c1); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c1); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c1); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c1); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c2 = vmull_s8(vb0123c2x0, va0x0); int16x8_t vprod1x0123c2 = vmull_s8(vb0123c2x0, va1x0); int16x8_t vprod2x0123c2 = vmull_s8(vb0123c2x0, va2x0); int16x8_t vprod3x0123c2 = vmull_s8(vb0123c2x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c2); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c2); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c2); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c2); int16x8_t vprod0x4567c2 = vmull_s8(vb4567c2x0, va0x0); int16x8_t vprod1x4567c2 = vmull_s8(vb4567c2x0, va1x0); int16x8_t vprod2x4567c2 = vmull_s8(vb4567c2x0, va2x0); int16x8_t vprod3x4567c2 = vmull_s8(vb4567c2x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c2); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c2); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c2); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c2); va0x0 = vext_s8(va0x0, va0x0, 2); va1x0 = vext_s8(va1x0, va1x0, 2); va2x0 = vext_s8(va2x0, va2x0, 2); va3x0 = vext_s8(va3x0, va3x0, 2); int16x8_t vprod0x0123c3 = vmull_s8(vb0123c3x0, va0x0); int16x8_t vprod1x0123c3 = vmull_s8(vb0123c3x0, va1x0); int16x8_t vprod2x0123c3 = vmull_s8(vb0123c3x0, va2x0); int16x8_t vprod3x0123c3 = vmull_s8(vb0123c3x0, va3x0); vacc0x0123 = vpadalq_s16(vacc0x0123, vprod0x0123c3); vacc1x0123 = vpadalq_s16(vacc1x0123, vprod1x0123c3); vacc2x0123 = vpadalq_s16(vacc2x0123, vprod2x0123c3); vacc3x0123 = vpadalq_s16(vacc3x0123, vprod3x0123c3); int16x8_t vprod0x4567c3 = vmull_s8(vb4567c3x0, va0x0); int16x8_t vprod1x4567c3 = vmull_s8(vb4567c3x0, va1x0); int16x8_t vprod2x4567c3 = vmull_s8(vb4567c3x0, va2x0); int16x8_t vprod3x4567c3 = vmull_s8(vb4567c3x0, va3x0); vacc0x4567 = vpadalq_s16(vacc0x4567, vprod0x4567c3); vacc1x4567 = vpadalq_s16(vacc1x4567, vprod1x4567c3); vacc2x4567 = vpadalq_s16(vacc2x4567, vprod2x4567c3); vacc3x4567 = vpadalq_s16(vacc3x4567, vprod3x4567c3); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
13,903
45.814815
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c4-minmax-rndnu-neon-mull-dup.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4__neon_mull_dup( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; int32x4_t vacc3x01 = vacc0x01; int32x4_t vacc3x23 = vacc0x23; int32x4_t vacc3x45 = vacc0x45; int32x4_t vacc3x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 1)); const int8x8_t va1c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 1)); const int8x8_t va2c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 1)); const int8x8_t va3c1 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 1)); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); } p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23); int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01)); const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23)); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23); const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45)); const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67)); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,273
51.118252
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c4-minmax-rndnu-neon-mull-ld1r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4__neon_mull_ld1r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; int32x4_t vacc3x01 = vacc0x01; int32x4_t vacc3x23 = vacc0x23; int32x4_t vacc3x45 = vacc0x45; int32x4_t vacc3x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int32x2_t va00 = vld1_dup_s32((const void*)a0); const int32x2_t va01 = vld1_dup_s32((const void*)(a0 + 4)); a0 += 8; const int32x2_t va10 = vld1_dup_s32((const void*)a1); const int32x2_t va11 = vld1_dup_s32((const void*)(a1 + 4)); a1 += 8; const int32x2_t va20 = vld1_dup_s32((const void*)a2); const int32x2_t va21 = vld1_dup_s32((const void*)(a2 + 4)); a2 += 8; const int32x2_t va30 = vld1_dup_s32((const void*)a3); const int32x2_t va31 = vld1_dup_s32((const void*)(a3 + 4)); a3 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va00); const int8x8_t va1c0 = vreinterpret_s8_s32(va10); const int8x8_t va2c0 = vreinterpret_s8_s32(va20); const int8x8_t va3c0 = vreinterpret_s8_s32(va30); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va01); const int8x8_t va1c1 = vreinterpret_s8_s32(va11); const int8x8_t va2c1 = vreinterpret_s8_s32(va21); const int8x8_t va3c1 = vreinterpret_s8_s32(va31); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); } p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23); int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01)); const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23)); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23); const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45)); const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67)); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,322
50.712468
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c4-minmax-rndnu-neon-mull-ld2r.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-dup.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4__neon_mull_ld2r( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t)); int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; int32x4_t vacc3x01 = vacc0x01; int32x4_t vacc3x23 = vacc0x23; int32x4_t vacc3x45 = vacc0x45; int32x4_t vacc3x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 8 * sizeof(int8_t)) { const int32x2x2_t va0 = vld2_dup_s32((const void*)a0); a0 += 8; const int32x2x2_t va1 = vld2_dup_s32((const void*)a1); a1 += 8; const int32x2x2_t va2 = vld2_dup_s32((const void*)a2); a2 += 8; const int32x2x2_t va3 = vld2_dup_s32((const void*)a3); a3 += 8; const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb01c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(va0.val[0]); const int8x8_t va1c0 = vreinterpret_s8_s32(va1.val[0]); const int8x8_t va2c0 = vreinterpret_s8_s32(va2.val[0]); const int8x8_t va3c0 = vreinterpret_s8_s32(va3.val[0]); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); const int8x8_t va0c1 = vreinterpret_s8_s32(va0.val[1]); const int8x8_t va1c1 = vreinterpret_s8_s32(va1.val[1]); const int8x8_t va2c1 = vreinterpret_s8_s32(va2.val[1]); const int8x8_t va3c1 = vreinterpret_s8_s32(va3.val[1]); const int16x8_t vprod0x01c1 = vmull_s8(vb01c1, va0c1); const int16x8_t vprod1x01c1 = vmull_s8(vb01c1, va1c1); const int16x8_t vprod2x01c1 = vmull_s8(vb01c1, va2c1); const int16x8_t vprod3x01c1 = vmull_s8(vb01c1, va3c1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1); const int16x8_t vprod0x23c1 = vmull_s8(vb23c1, va0c1); const int16x8_t vprod1x23c1 = vmull_s8(vb23c1, va1c1); const int16x8_t vprod2x23c1 = vmull_s8(vb23c1, va2c1); const int16x8_t vprod3x23c1 = vmull_s8(vb23c1, va3c1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1); const int16x8_t vprod0x45c1 = vmull_s8(vb45c1, va0c1); const int16x8_t vprod1x45c1 = vmull_s8(vb45c1, va1c1); const int16x8_t vprod2x45c1 = vmull_s8(vb45c1, va2c1); const int16x8_t vprod3x45c1 = vmull_s8(vb45c1, va3c1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1); const int16x8_t vprod0x67c1 = vmull_s8(vb67c1, va0c1); const int16x8_t vprod1x67c1 = vmull_s8(vb67c1, va1c1); const int16x8_t vprod2x67c1 = vmull_s8(vb67c1, va2c1); const int16x8_t vprod3x67c1 = vmull_s8(vb67c1, va3c1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1); k -= 8 * sizeof(int8_t); } if XNN_UNLIKELY(k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 = (const int8_t*) ((uintptr_t) a0 + k); const int8x8_t va1 = vld1_s8(a1); a1 = (const int8_t*) ((uintptr_t) a1 + k); const int8x8_t va2 = vld1_s8(a2); a2 = (const int8_t*) ((uintptr_t) a2 + k); const int8x8_t va3 = vld1_s8(a3); a3 = (const int8_t*) ((uintptr_t) a3 + k); const int8x8_t vb01c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb23c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb45c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t vb67c0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int8x8_t va0c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va0), 0)); const int16x8_t vprod0x01c0 = vmull_s8(vb01c0, va0c0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); const int16x8_t vprod0x23c0 = vmull_s8(vb23c0, va0c0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); const int16x8_t vprod0x45c0 = vmull_s8(vb45c0, va0c0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); const int16x8_t vprod0x67c0 = vmull_s8(vb67c0, va0c0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); const int8x8_t va1c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va1), 0)); const int16x8_t vprod1x01c0 = vmull_s8(vb01c0, va1c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); const int16x8_t vprod1x23c0 = vmull_s8(vb23c0, va1c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); const int16x8_t vprod1x45c0 = vmull_s8(vb45c0, va1c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); const int16x8_t vprod1x67c0 = vmull_s8(vb67c0, va1c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); const int8x8_t va2c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va2), 0)); const int16x8_t vprod2x01c0 = vmull_s8(vb01c0, va2c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); const int16x8_t vprod2x23c0 = vmull_s8(vb23c0, va2c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); const int16x8_t vprod2x45c0 = vmull_s8(vb45c0, va2c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); const int16x8_t vprod2x67c0 = vmull_s8(vb67c0, va2c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); const int8x8_t va3c0 = vreinterpret_s8_s32(vdup_lane_s32(vreinterpret_s32_s8(va3), 0)); const int16x8_t vprod3x01c0 = vmull_s8(vb01c0, va3c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); const int16x8_t vprod3x23c0 = vmull_s8(vb23c0, va3c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); const int16x8_t vprod3x45c0 = vmull_s8(vb45c0, va3c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); const int16x8_t vprod3x67c0 = vmull_s8(vb67c0, va3c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); } p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23); int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01)); const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23)); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23); const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45)); const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67)); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,102
50.678663
128
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c4-minmax-rndnu-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4__neondot( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 4x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; // Load a 8x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x8 * 8x8 --> 4x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 4x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); const int8x8_t va1x01234567 = vld1_s8(a1); const int8x8_t va2x01234567 = vld1_s8(a2); const int8x8_t va3x01234567 = vld1_s8(a3); // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 4x4 * 4x8 --> 4x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); } p -= 4 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
11,672
45.879518
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c4s2-minmax-rndnu-neon-mlal.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mlal( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; int32x4_t vacc3x01 = vacc0x01; int32x4_t vacc3x23 = vacc0x23; int32x4_t vacc3x45 = vacc0x45; int32x4_t vacc3x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; while (k >= 16 * sizeof(int8_t)) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va0x1 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va1x1 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va2x1 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; int8x8_t va3x1 = vld1_s8(a3); a3 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0); int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0); const int8x8_t vb01c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c0 = vmlal_s8(vprod0x01c0, vb01c0x1, va0x1); vprod1x01c0 = vmlal_s8(vprod1x01c0, vb01c0x1, va1x1); vprod2x01c0 = vmlal_s8(vprod2x01c0, vb01c0x1, va2x1); vprod3x01c0 = vmlal_s8(vprod3x01c0, vb01c0x1, va3x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0); int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0); const int8x8_t vb23c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c0 = vmlal_s8(vprod0x23c0, vb23c0x1, va0x1); vprod1x23c0 = vmlal_s8(vprod1x23c0, vb23c0x1, va1x1); vprod2x23c0 = vmlal_s8(vprod2x23c0, vb23c0x1, va2x1); vprod3x23c0 = vmlal_s8(vprod3x23c0, vb23c0x1, va3x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0); int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0); const int8x8_t vb45c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c0 = vmlal_s8(vprod0x45c0, vb45c0x1, va0x1); vprod1x45c0 = vmlal_s8(vprod1x45c0, vb45c0x1, va1x1); vprod2x45c0 = vmlal_s8(vprod2x45c0, vb45c0x1, va2x1); vprod3x45c0 = vmlal_s8(vprod3x45c0, vb45c0x1, va3x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0); int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0); const int8x8_t vb67c0x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c0 = vmlal_s8(vprod0x67c0, vb67c0x1, va0x1); vprod1x67c0 = vmlal_s8(vprod1x67c0, vb67c0x1, va1x1); vprod2x67c0 = vmlal_s8(vprod2x67c0, vb67c0x1, va2x1); vprod3x67c0 = vmlal_s8(vprod3x67c0, vb67c0x1, va3x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va0x1 = vext_s8(va0x1, va0x1, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va1x1 = vext_s8(va1x1, va1x1, 4); va2x0 = vext_s8(va2x0, va2x0, 4); va2x1 = vext_s8(va2x1, va2x1, 4); va3x0 = vext_s8(va3x0, va3x0, 4); va3x1 = vext_s8(va3x1, va3x1, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0); int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0); const int8x8_t vb01c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x01c1 = vmlal_s8(vprod0x01c1, vb01c1x1, va0x1); vprod1x01c1 = vmlal_s8(vprod1x01c1, vb01c1x1, va1x1); vprod2x01c1 = vmlal_s8(vprod2x01c1, vb01c1x1, va2x1); vprod3x01c1 = vmlal_s8(vprod3x01c1, vb01c1x1, va3x1); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0); int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0); const int8x8_t vb23c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x23c1 = vmlal_s8(vprod0x23c1, vb23c1x1, va0x1); vprod1x23c1 = vmlal_s8(vprod1x23c1, vb23c1x1, va1x1); vprod2x23c1 = vmlal_s8(vprod2x23c1, vb23c1x1, va2x1); vprod3x23c1 = vmlal_s8(vprod3x23c1, vb23c1x1, va3x1); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0); int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0); const int8x8_t vb45c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x45c1 = vmlal_s8(vprod0x45c1, vb45c1x1, va0x1); vprod1x45c1 = vmlal_s8(vprod1x45c1, vb45c1x1, va1x1); vprod2x45c1 = vmlal_s8(vprod2x45c1, vb45c1x1, va2x1); vprod3x45c1 = vmlal_s8(vprod3x45c1, vb45c1x1, va3x1); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0); int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0); const int8x8_t vb67c1x1 = vld1_s8(w); w = (const int8_t*) w + 8; vprod0x67c1 = vmlal_s8(vprod0x67c1, vb67c1x1, va0x1); vprod1x67c1 = vmlal_s8(vprod1x67c1, vb67c1x1, va1x1); vprod2x67c1 = vmlal_s8(vprod2x67c1, vb67c1x1, va2x1); vprod3x67c1 = vmlal_s8(vprod3x67c1, vb67c1x1, va3x1); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1); k -= 16 * sizeof(int8_t); } if (k != 0) { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0); int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0); int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0); int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0); int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va2x0 = vext_s8(va2x0, va2x0, 4); va3x0 = vext_s8(va3x0, va3x0, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0); int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0); int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0); int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0); int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1); } p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23); int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01)); const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23)); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23); const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45)); const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67)); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
23,686
49.830472
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c4s2-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neon-mull-shuffle.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/gemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c4s2__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } kc = round_up_po2(kc, 8 * sizeof(int8_t)); do { int32x4_t vacc0x01 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x23 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x45 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc0x67 = vreinterpretq_s32_u64(vmovl_u32(vld1_u32(w))); w = (const int32_t*) w + 2; int32x4_t vacc1x01 = vacc0x01; int32x4_t vacc1x23 = vacc0x23; int32x4_t vacc1x45 = vacc0x45; int32x4_t vacc1x67 = vacc0x67; int32x4_t vacc2x01 = vacc0x01; int32x4_t vacc2x23 = vacc0x23; int32x4_t vacc2x45 = vacc0x45; int32x4_t vacc2x67 = vacc0x67; int32x4_t vacc3x01 = vacc0x01; int32x4_t vacc3x23 = vacc0x23; int32x4_t vacc3x45 = vacc0x45; int32x4_t vacc3x67 = vacc0x67; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; do { int8x8_t va0x0 = vld1_s8(a0); a0 += 8; int8x8_t va1x0 = vld1_s8(a1); a1 += 8; int8x8_t va2x0 = vld1_s8(a2); a2 += 8; int8x8_t va3x0 = vld1_s8(a3); a3 += 8; const int8x8_t vb01c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c0x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb01c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb23c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb45c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; const int8x8_t vb67c1x0 = vld1_s8(w); w = (const int8_t*) w + 8; int16x8_t vprod0x01c0 = vmull_s8(vb01c0x0, va0x0); int16x8_t vprod1x01c0 = vmull_s8(vb01c0x0, va1x0); int16x8_t vprod2x01c0 = vmull_s8(vb01c0x0, va2x0); int16x8_t vprod3x01c0 = vmull_s8(vb01c0x0, va3x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c0); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c0); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c0); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c0); int16x8_t vprod0x23c0 = vmull_s8(vb23c0x0, va0x0); int16x8_t vprod1x23c0 = vmull_s8(vb23c0x0, va1x0); int16x8_t vprod2x23c0 = vmull_s8(vb23c0x0, va2x0); int16x8_t vprod3x23c0 = vmull_s8(vb23c0x0, va3x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c0); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c0); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c0); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c0); int16x8_t vprod0x45c0 = vmull_s8(vb45c0x0, va0x0); int16x8_t vprod1x45c0 = vmull_s8(vb45c0x0, va1x0); int16x8_t vprod2x45c0 = vmull_s8(vb45c0x0, va2x0); int16x8_t vprod3x45c0 = vmull_s8(vb45c0x0, va3x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c0); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c0); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c0); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c0); int16x8_t vprod0x67c0 = vmull_s8(vb67c0x0, va0x0); int16x8_t vprod1x67c0 = vmull_s8(vb67c0x0, va1x0); int16x8_t vprod2x67c0 = vmull_s8(vb67c0x0, va2x0); int16x8_t vprod3x67c0 = vmull_s8(vb67c0x0, va3x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c0); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c0); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c0); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c0); va0x0 = vext_s8(va0x0, va0x0, 4); va1x0 = vext_s8(va1x0, va1x0, 4); va2x0 = vext_s8(va2x0, va2x0, 4); va3x0 = vext_s8(va3x0, va3x0, 4); int16x8_t vprod0x01c1 = vmull_s8(vb01c1x0, va0x0); int16x8_t vprod1x01c1 = vmull_s8(vb01c1x0, va1x0); int16x8_t vprod2x01c1 = vmull_s8(vb01c1x0, va2x0); int16x8_t vprod3x01c1 = vmull_s8(vb01c1x0, va3x0); vacc0x01 = vpadalq_s16(vacc0x01, vprod0x01c1); vacc1x01 = vpadalq_s16(vacc1x01, vprod1x01c1); vacc2x01 = vpadalq_s16(vacc2x01, vprod2x01c1); vacc3x01 = vpadalq_s16(vacc3x01, vprod3x01c1); int16x8_t vprod0x23c1 = vmull_s8(vb23c1x0, va0x0); int16x8_t vprod1x23c1 = vmull_s8(vb23c1x0, va1x0); int16x8_t vprod2x23c1 = vmull_s8(vb23c1x0, va2x0); int16x8_t vprod3x23c1 = vmull_s8(vb23c1x0, va3x0); vacc0x23 = vpadalq_s16(vacc0x23, vprod0x23c1); vacc1x23 = vpadalq_s16(vacc1x23, vprod1x23c1); vacc2x23 = vpadalq_s16(vacc2x23, vprod2x23c1); vacc3x23 = vpadalq_s16(vacc3x23, vprod3x23c1); int16x8_t vprod0x45c1 = vmull_s8(vb45c1x0, va0x0); int16x8_t vprod1x45c1 = vmull_s8(vb45c1x0, va1x0); int16x8_t vprod2x45c1 = vmull_s8(vb45c1x0, va2x0); int16x8_t vprod3x45c1 = vmull_s8(vb45c1x0, va3x0); vacc0x45 = vpadalq_s16(vacc0x45, vprod0x45c1); vacc1x45 = vpadalq_s16(vacc1x45, vprod1x45c1); vacc2x45 = vpadalq_s16(vacc2x45, vprod2x45c1); vacc3x45 = vpadalq_s16(vacc3x45, vprod3x45c1); int16x8_t vprod0x67c1 = vmull_s8(vb67c1x0, va0x0); int16x8_t vprod1x67c1 = vmull_s8(vb67c1x0, va1x0); int16x8_t vprod2x67c1 = vmull_s8(vb67c1x0, va2x0); int16x8_t vprod3x67c1 = vmull_s8(vb67c1x0, va3x0); vacc0x67 = vpadalq_s16(vacc0x67, vprod0x67c1); vacc1x67 = vpadalq_s16(vacc1x67, vprod1x67c1); vacc2x67 = vpadalq_s16(vacc2x67, vprod2x67c1); vacc3x67 = vpadalq_s16(vacc3x67, vprod3x67c1); k -= 8 * sizeof(int8_t); } while (k != 0); p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 int32x4_t vacc0x0123 = vpaddq_s32(vacc0x01, vacc0x23); int32x4_t vacc0x4567 = vpaddq_s32(vacc0x45, vacc0x67); int32x4_t vacc1x0123 = vpaddq_s32(vacc1x01, vacc1x23); int32x4_t vacc1x4567 = vpaddq_s32(vacc1x45, vacc1x67); int32x4_t vacc2x0123 = vpaddq_s32(vacc2x01, vacc2x23); int32x4_t vacc2x4567 = vpaddq_s32(vacc2x45, vacc2x67); int32x4_t vacc3x0123 = vpaddq_s32(vacc3x01, vacc3x23); int32x4_t vacc3x4567 = vpaddq_s32(vacc3x45, vacc3x67); #else const int32x2_t vsum0x01 = vpadd_s32(vget_low_s32(vacc0x01), vget_high_s32(vacc0x01)); const int32x2_t vsum0x23 = vpadd_s32(vget_low_s32(vacc0x23), vget_high_s32(vacc0x23)); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23); const int32x2_t vsum0x45 = vpadd_s32(vget_low_s32(vacc0x45), vget_high_s32(vacc0x45)); const int32x2_t vsum0x67 = vpadd_s32(vget_low_s32(vacc0x67), vget_high_s32(vacc0x67)); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67); const int32x2_t vsum1x01 = vpadd_s32(vget_low_s32(vacc1x01), vget_high_s32(vacc1x01)); const int32x2_t vsum1x23 = vpadd_s32(vget_low_s32(vacc1x23), vget_high_s32(vacc1x23)); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23); const int32x2_t vsum1x45 = vpadd_s32(vget_low_s32(vacc1x45), vget_high_s32(vacc1x45)); const int32x2_t vsum1x67 = vpadd_s32(vget_low_s32(vacc1x67), vget_high_s32(vacc1x67)); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67); const int32x2_t vsum2x01 = vpadd_s32(vget_low_s32(vacc2x01), vget_high_s32(vacc2x01)); const int32x2_t vsum2x23 = vpadd_s32(vget_low_s32(vacc2x23), vget_high_s32(vacc2x23)); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23); const int32x2_t vsum2x45 = vpadd_s32(vget_low_s32(vacc2x45), vget_high_s32(vacc2x45)); const int32x2_t vsum2x67 = vpadd_s32(vget_low_s32(vacc2x67), vget_high_s32(vacc2x67)); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67); const int32x2_t vsum3x01 = vpadd_s32(vget_low_s32(vacc3x01), vget_high_s32(vacc3x01)); const int32x2_t vsum3x23 = vpadd_s32(vget_low_s32(vacc3x23), vget_high_s32(vacc3x23)); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23); const int32x2_t vsum3x45 = vpadd_s32(vget_low_s32(vacc3x45), vget_high_s32(vacc3x45)); const int32x2_t vsum3x67 = vpadd_s32(vget_low_s32(vacc3x67), vget_high_s32(vacc3x67)); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,128
47.435435
110
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-4x8c8-minmax-rndnu-neon-mull.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c8-neon-mull.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_4x8c8__neon_mull( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 4); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (4 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 8 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr != 4) { c3 = c2; } do { int32x4_t vacc0x0 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x1 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x2 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x3 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x4 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x5 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x6 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc0x7 = vld1q_lane_s32(w, vmovq_n_s32(0), 0); w = (const void*) ((uintptr_t) w + sizeof(int32_t)); int32x4_t vacc1x0 = vacc0x0; int32x4_t vacc1x1 = vacc0x1; int32x4_t vacc1x2 = vacc0x2; int32x4_t vacc1x3 = vacc0x3; int32x4_t vacc1x4 = vacc0x4; int32x4_t vacc1x5 = vacc0x5; int32x4_t vacc1x6 = vacc0x6; int32x4_t vacc1x7 = vacc0x7; int32x4_t vacc2x0 = vacc0x0; int32x4_t vacc2x1 = vacc0x1; int32x4_t vacc2x2 = vacc0x2; int32x4_t vacc2x3 = vacc0x3; int32x4_t vacc2x4 = vacc0x4; int32x4_t vacc2x5 = vacc0x5; int32x4_t vacc2x6 = vacc0x6; int32x4_t vacc2x7 = vacc0x7; int32x4_t vacc3x0 = vacc0x0; int32x4_t vacc3x1 = vacc0x1; int32x4_t vacc3x2 = vacc0x2; int32x4_t vacc3x3 = vacc0x3; int32x4_t vacc3x4 = vacc0x4; int32x4_t vacc3x5 = vacc0x5; int32x4_t vacc3x6 = vacc0x6; int32x4_t vacc3x7 = vacc0x7; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } a += 4; size_t k = kc; // Handle 8 bytes at a time using MUL. while (k != 0) { const int8x8_t va0 = vld1_s8(a0); a0 += 8; const int8x8_t va1 = vld1_s8(a1); a1 += 8; const int8x8_t va2 = vld1_s8(a2); a2 += 8; const int8x8_t va3 = vld1_s8(a3); a3 += 8; const int8x8_t vb0 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x0 = vmull_s8(vb0, va0); const int16x8_t vprod1x0 = vmull_s8(vb0, va1); const int16x8_t vprod2x0 = vmull_s8(vb0, va2); const int16x8_t vprod3x0 = vmull_s8(vb0, va3); vacc0x0 = vpadalq_s16(vacc0x0, vprod0x0); vacc1x0 = vpadalq_s16(vacc1x0, vprod1x0); vacc2x0 = vpadalq_s16(vacc2x0, vprod2x0); vacc3x0 = vpadalq_s16(vacc3x0, vprod3x0); const int8x8_t vb1 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x1 = vmull_s8(vb1, va0); const int16x8_t vprod1x1 = vmull_s8(vb1, va1); const int16x8_t vprod2x1 = vmull_s8(vb1, va2); const int16x8_t vprod3x1 = vmull_s8(vb1, va3); vacc0x1 = vpadalq_s16(vacc0x1, vprod0x1); vacc1x1 = vpadalq_s16(vacc1x1, vprod1x1); vacc2x1 = vpadalq_s16(vacc2x1, vprod2x1); vacc3x1 = vpadalq_s16(vacc3x1, vprod3x1); const int8x8_t vb2 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x2 = vmull_s8(vb2, va0); const int16x8_t vprod1x2 = vmull_s8(vb2, va1); const int16x8_t vprod2x2 = vmull_s8(vb2, va2); const int16x8_t vprod3x2 = vmull_s8(vb2, va3); vacc0x2 = vpadalq_s16(vacc0x2, vprod0x2); vacc1x2 = vpadalq_s16(vacc1x2, vprod1x2); vacc2x2 = vpadalq_s16(vacc2x2, vprod2x2); vacc3x2 = vpadalq_s16(vacc3x2, vprod3x2); const int8x8_t vb3 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x3 = vmull_s8(vb3, va0); const int16x8_t vprod1x3 = vmull_s8(vb3, va1); const int16x8_t vprod2x3 = vmull_s8(vb3, va2); const int16x8_t vprod3x3 = vmull_s8(vb3, va3); vacc0x3 = vpadalq_s16(vacc0x3, vprod0x3); vacc1x3 = vpadalq_s16(vacc1x3, vprod1x3); vacc2x3 = vpadalq_s16(vacc2x3, vprod2x3); vacc3x3 = vpadalq_s16(vacc3x3, vprod3x3); const int8x8_t vb4 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x4 = vmull_s8(vb4, va0); const int16x8_t vprod1x4 = vmull_s8(vb4, va1); const int16x8_t vprod2x4 = vmull_s8(vb4, va2); const int16x8_t vprod3x4 = vmull_s8(vb4, va3); vacc0x4 = vpadalq_s16(vacc0x4, vprod0x4); vacc1x4 = vpadalq_s16(vacc1x4, vprod1x4); vacc2x4 = vpadalq_s16(vacc2x4, vprod2x4); vacc3x4 = vpadalq_s16(vacc3x4, vprod3x4); const int8x8_t vb5 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x5 = vmull_s8(vb5, va0); const int16x8_t vprod1x5 = vmull_s8(vb5, va1); const int16x8_t vprod2x5 = vmull_s8(vb5, va2); const int16x8_t vprod3x5 = vmull_s8(vb5, va3); vacc0x5 = vpadalq_s16(vacc0x5, vprod0x5); vacc1x5 = vpadalq_s16(vacc1x5, vprod1x5); vacc2x5 = vpadalq_s16(vacc2x5, vprod2x5); vacc3x5 = vpadalq_s16(vacc3x5, vprod3x5); const int8x8_t vb6 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x6 = vmull_s8(vb6, va0); const int16x8_t vprod1x6 = vmull_s8(vb6, va1); const int16x8_t vprod2x6 = vmull_s8(vb6, va2); const int16x8_t vprod3x6 = vmull_s8(vb6, va3); vacc0x6 = vpadalq_s16(vacc0x6, vprod0x6); vacc1x6 = vpadalq_s16(vacc1x6, vprod1x6); vacc2x6 = vpadalq_s16(vacc2x6, vprod2x6); vacc3x6 = vpadalq_s16(vacc3x6, vprod3x6); const int8x8_t vb7 = vld1_s8(w); w = (const void*) ((uintptr_t) w + 8 * sizeof(int8_t)); const int16x8_t vprod0x7 = vmull_s8(vb7, va0); const int16x8_t vprod1x7 = vmull_s8(vb7, va1); const int16x8_t vprod2x7 = vmull_s8(vb7, va2); const int16x8_t vprod3x7 = vmull_s8(vb7, va3); vacc0x7 = vpadalq_s16(vacc0x7, vprod0x7); vacc1x7 = vpadalq_s16(vacc1x7, vprod1x7); vacc2x7 = vpadalq_s16(vacc2x7, vprod2x7); vacc3x7 = vpadalq_s16(vacc3x7, vprod3x7); k -= 8 * sizeof(int8_t); } p -= 4 * sizeof(void*); } while (p != 0); #if XNN_ARCH_ARM64 const int32x4_t vsum0x01 = vpaddq_s32(vacc0x0, vacc0x1); const int32x4_t vsum0x23 = vpaddq_s32(vacc0x2, vacc0x3); const int32x4_t vsum0x45 = vpaddq_s32(vacc0x4, vacc0x5); const int32x4_t vsum0x67 = vpaddq_s32(vacc0x6, vacc0x7); const int32x4_t vsum1x01 = vpaddq_s32(vacc1x0, vacc1x1); const int32x4_t vsum1x23 = vpaddq_s32(vacc1x2, vacc1x3); const int32x4_t vsum1x45 = vpaddq_s32(vacc1x4, vacc1x5); const int32x4_t vsum1x67 = vpaddq_s32(vacc1x6, vacc1x7); const int32x4_t vsum2x01 = vpaddq_s32(vacc2x0, vacc2x1); const int32x4_t vsum2x23 = vpaddq_s32(vacc2x2, vacc2x3); const int32x4_t vsum2x45 = vpaddq_s32(vacc2x4, vacc2x5); const int32x4_t vsum2x67 = vpaddq_s32(vacc2x6, vacc2x7); const int32x4_t vsum3x01 = vpaddq_s32(vacc3x0, vacc3x1); const int32x4_t vsum3x23 = vpaddq_s32(vacc3x2, vacc3x3); const int32x4_t vsum3x45 = vpaddq_s32(vacc3x4, vacc3x5); const int32x4_t vsum3x67 = vpaddq_s32(vacc3x6, vacc3x7); int32x4_t vacc0x0123 = vpaddq_s32(vsum0x01, vsum0x23); int32x4_t vacc0x4567 = vpaddq_s32(vsum0x45, vsum0x67); int32x4_t vacc1x0123 = vpaddq_s32(vsum1x01, vsum1x23); int32x4_t vacc1x4567 = vpaddq_s32(vsum1x45, vsum1x67); int32x4_t vacc2x0123 = vpaddq_s32(vsum2x01, vsum2x23); int32x4_t vacc2x4567 = vpaddq_s32(vsum2x45, vsum2x67); int32x4_t vacc3x0123 = vpaddq_s32(vsum3x01, vsum3x23); int32x4_t vacc3x4567 = vpaddq_s32(vsum3x45, vsum3x67); #else const int32x2_t vpsum0x0 = vadd_s32(vget_low_s32(vacc0x0), vget_high_s32(vacc0x0)); const int32x2_t vpsum0x1 = vadd_s32(vget_low_s32(vacc0x1), vget_high_s32(vacc0x1)); const int32x2_t vpsum0x2 = vadd_s32(vget_low_s32(vacc0x2), vget_high_s32(vacc0x2)); const int32x2_t vpsum0x3 = vadd_s32(vget_low_s32(vacc0x3), vget_high_s32(vacc0x3)); const int32x2_t vsum0x01 = vpadd_s32(vpsum0x0, vpsum0x1); const int32x2_t vsum0x23 = vpadd_s32(vpsum0x2, vpsum0x3); int32x4_t vacc0x0123 = vcombine_s32(vsum0x01, vsum0x23 ); const int32x2_t vpsum0x4 = vadd_s32(vget_low_s32(vacc0x4), vget_high_s32(vacc0x4)); const int32x2_t vpsum0x5 = vadd_s32(vget_low_s32(vacc0x5), vget_high_s32(vacc0x5)); const int32x2_t vpsum0x6 = vadd_s32(vget_low_s32(vacc0x6), vget_high_s32(vacc0x6)); const int32x2_t vpsum0x7 = vadd_s32(vget_low_s32(vacc0x7), vget_high_s32(vacc0x7)); const int32x2_t vsum0x45 = vpadd_s32(vpsum0x4, vpsum0x5); const int32x2_t vsum0x67 = vpadd_s32(vpsum0x6, vpsum0x7); int32x4_t vacc0x4567 = vcombine_s32(vsum0x45, vsum0x67 ); const int32x2_t vpsum1x0 = vadd_s32(vget_low_s32(vacc1x0), vget_high_s32(vacc1x0)); const int32x2_t vpsum1x1 = vadd_s32(vget_low_s32(vacc1x1), vget_high_s32(vacc1x1)); const int32x2_t vpsum1x2 = vadd_s32(vget_low_s32(vacc1x2), vget_high_s32(vacc1x2)); const int32x2_t vpsum1x3 = vadd_s32(vget_low_s32(vacc1x3), vget_high_s32(vacc1x3)); const int32x2_t vsum1x01 = vpadd_s32(vpsum1x0, vpsum1x1); const int32x2_t vsum1x23 = vpadd_s32(vpsum1x2, vpsum1x3); int32x4_t vacc1x0123 = vcombine_s32(vsum1x01, vsum1x23 ); const int32x2_t vpsum1x4 = vadd_s32(vget_low_s32(vacc1x4), vget_high_s32(vacc1x4)); const int32x2_t vpsum1x5 = vadd_s32(vget_low_s32(vacc1x5), vget_high_s32(vacc1x5)); const int32x2_t vpsum1x6 = vadd_s32(vget_low_s32(vacc1x6), vget_high_s32(vacc1x6)); const int32x2_t vpsum1x7 = vadd_s32(vget_low_s32(vacc1x7), vget_high_s32(vacc1x7)); const int32x2_t vsum1x45 = vpadd_s32(vpsum1x4, vpsum1x5); const int32x2_t vsum1x67 = vpadd_s32(vpsum1x6, vpsum1x7); int32x4_t vacc1x4567 = vcombine_s32(vsum1x45, vsum1x67 ); const int32x2_t vpsum2x0 = vadd_s32(vget_low_s32(vacc2x0), vget_high_s32(vacc2x0)); const int32x2_t vpsum2x1 = vadd_s32(vget_low_s32(vacc2x1), vget_high_s32(vacc2x1)); const int32x2_t vpsum2x2 = vadd_s32(vget_low_s32(vacc2x2), vget_high_s32(vacc2x2)); const int32x2_t vpsum2x3 = vadd_s32(vget_low_s32(vacc2x3), vget_high_s32(vacc2x3)); const int32x2_t vsum2x01 = vpadd_s32(vpsum2x0, vpsum2x1); const int32x2_t vsum2x23 = vpadd_s32(vpsum2x2, vpsum2x3); int32x4_t vacc2x0123 = vcombine_s32(vsum2x01, vsum2x23 ); const int32x2_t vpsum2x4 = vadd_s32(vget_low_s32(vacc2x4), vget_high_s32(vacc2x4)); const int32x2_t vpsum2x5 = vadd_s32(vget_low_s32(vacc2x5), vget_high_s32(vacc2x5)); const int32x2_t vpsum2x6 = vadd_s32(vget_low_s32(vacc2x6), vget_high_s32(vacc2x6)); const int32x2_t vpsum2x7 = vadd_s32(vget_low_s32(vacc2x7), vget_high_s32(vacc2x7)); const int32x2_t vsum2x45 = vpadd_s32(vpsum2x4, vpsum2x5); const int32x2_t vsum2x67 = vpadd_s32(vpsum2x6, vpsum2x7); int32x4_t vacc2x4567 = vcombine_s32(vsum2x45, vsum2x67 ); const int32x2_t vpsum3x0 = vadd_s32(vget_low_s32(vacc3x0), vget_high_s32(vacc3x0)); const int32x2_t vpsum3x1 = vadd_s32(vget_low_s32(vacc3x1), vget_high_s32(vacc3x1)); const int32x2_t vpsum3x2 = vadd_s32(vget_low_s32(vacc3x2), vget_high_s32(vacc3x2)); const int32x2_t vpsum3x3 = vadd_s32(vget_low_s32(vacc3x3), vget_high_s32(vacc3x3)); const int32x2_t vsum3x01 = vpadd_s32(vpsum3x0, vpsum3x1); const int32x2_t vsum3x23 = vpadd_s32(vpsum3x2, vpsum3x3); int32x4_t vacc3x0123 = vcombine_s32(vsum3x01, vsum3x23 ); const int32x2_t vpsum3x4 = vadd_s32(vget_low_s32(vacc3x4), vget_high_s32(vacc3x4)); const int32x2_t vpsum3x5 = vadd_s32(vget_low_s32(vacc3x5), vget_high_s32(vacc3x5)); const int32x2_t vpsum3x6 = vadd_s32(vget_low_s32(vacc3x6), vget_high_s32(vacc3x6)); const int32x2_t vpsum3x7 = vadd_s32(vget_low_s32(vacc3x7), vget_high_s32(vacc3x7)); const int32x2_t vsum3x45 = vpadd_s32(vpsum3x4, vpsum3x5); const int32x2_t vsum3x67 = vpadd_s32(vpsum3x6, vpsum3x7); int32x4_t vacc3x4567 = vcombine_s32(vsum3x45, vsum3x67 ); #endif const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vqshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vqshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vqshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vqshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vqshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vqshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vqshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vqshlq_s32(vacc3x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 int16x8_t vacc0x01234567 = vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567); int16x8_t vacc1x01234567 = vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567); int16x8_t vacc2x01234567 = vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567); int16x8_t vacc3x01234567 = vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); #else int16x8_t vacc0x01234567 = vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)); int16x8_t vacc1x01234567 = vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)); int16x8_t vacc2x01234567 = vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)); int16x8_t vacc3x01234567 = vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)); vacc0x01234567 = vqaddq_s16(vacc0x01234567, voutput_zero_point); vacc1x01234567 = vqaddq_s16(vacc1x01234567, voutput_zero_point); vacc2x01234567 = vqaddq_s16(vacc2x01234567, voutput_zero_point); vacc3x01234567 = vqaddq_s16(vacc3x01234567, voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); if (nc >= 8) { vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,141
49.992405
114
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-6x8c4-minmax-rndnu-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_6x8c4__neondot( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 6); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (6 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { c3 = c2; } int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { c4 = c3; } int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr != 6) { c5 = c4; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc4x0123 = vacc0x0123; int32x4_t vacc4x4567 = vacc0x4567; int32x4_t vacc5x0123 = vacc0x0123; int32x4_t vacc5x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } const int8_t* restrict a4 = a[4]; if XNN_UNPREDICTABLE(a4 != zero) { a4 = (const int8_t*) ((uintptr_t) a4 + a_offset); } const int8_t* restrict a5 = a[5]; if XNN_UNPREDICTABLE(a5 != zero) { a5 = (const int8_t*) ((uintptr_t) a5 + a_offset); } a += 6; // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 6x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8; const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8; // Load a 8x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 6x8 * 8x8 --> 6x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 6x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); const int8x8_t va1x01234567 = vld1_s8(a1); const int8x8_t va2x01234567 = vld1_s8(a2); const int8x8_t va3x01234567 = vld1_s8(a3); const int8x8_t va4x01234567 = vld1_s8(a4); const int8x8_t va5x01234567 = vld1_s8(a5); // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 6x4 * 4x8 --> 6x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); } p -= 6 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift); vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift); vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift); vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift); vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier); vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier); vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier); vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift); vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift); vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift); vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567)); vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c5 = (int8_t*) ((uintptr_t) c5 + cn_stride); c4 = (int8_t*) ((uintptr_t) c4 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c5, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4; vst1q_lane_u32((void*) c4, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c5, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2; vst1q_lane_u16((void*) c4, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8); vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
16,020
49.539432
130
c
XNNPACK
XNNPACK-master/src/qs8-igemm/gen/qs8-igemm-8x8c4-minmax-rndnu-neondot.c
// Auto-generated file. Do not edit! // Template: src/qs8-igemm/c4-neondot.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <arm_neon.h> #include <xnnpack/igemm.h> #include <xnnpack/math.h> void xnn_qs8_igemm_minmax_rndnu_ukernel_8x8c4__neondot( size_t mr, size_t nc, size_t kc, size_t ks, const int8_t** restrict a, const void* restrict w, int8_t* restrict c, size_t cm_stride, size_t cn_stride, size_t a_offset, const int8_t* zero, const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS { assert(mr != 0); assert(mr <= 8); assert(nc != 0); assert(kc != 0); assert(ks != 0); assert(ks % (8 * sizeof(void*)) == 0); assert(a_offset % sizeof(int8_t) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); kc = round_up_po2(kc, 4 * sizeof(int8_t)); int8_t* c0 = c; int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride); if XNN_UNPREDICTABLE(mr < 2) { c1 = c0; } int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride); if XNN_UNPREDICTABLE(mr <= 2) { c2 = c1; } int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride); if XNN_UNPREDICTABLE(mr < 4) { c3 = c2; } int8_t* c4 = (int8_t*) ((uintptr_t) c3 + cm_stride); if XNN_UNPREDICTABLE(mr <= 4) { c4 = c3; } int8_t* c5 = (int8_t*) ((uintptr_t) c4 + cm_stride); if XNN_UNPREDICTABLE(mr < 6) { c5 = c4; } int8_t* c6 = (int8_t*) ((uintptr_t) c5 + cm_stride); if XNN_UNPREDICTABLE(mr <= 6) { c6 = c5; } int8_t* c7 = (int8_t*) ((uintptr_t) c6 + cm_stride); if XNN_UNPREDICTABLE(mr != 8) { c7 = c6; } do { int32x4_t vacc0x0123 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc0x4567 = vld1q_s32(w); w = (const int32_t*) w + 4; int32x4_t vacc1x0123 = vacc0x0123; int32x4_t vacc1x4567 = vacc0x4567; int32x4_t vacc2x0123 = vacc0x0123; int32x4_t vacc2x4567 = vacc0x4567; int32x4_t vacc3x0123 = vacc0x0123; int32x4_t vacc3x4567 = vacc0x4567; int32x4_t vacc4x0123 = vacc0x0123; int32x4_t vacc4x4567 = vacc0x4567; int32x4_t vacc5x0123 = vacc0x0123; int32x4_t vacc5x4567 = vacc0x4567; int32x4_t vacc6x0123 = vacc0x0123; int32x4_t vacc6x4567 = vacc0x4567; int32x4_t vacc7x0123 = vacc0x0123; int32x4_t vacc7x4567 = vacc0x4567; size_t p = ks; do { const int8_t* restrict a0 = a[0]; if XNN_UNPREDICTABLE(a0 != zero) { a0 = (const int8_t*) ((uintptr_t) a0 + a_offset); } const int8_t* restrict a1 = a[1]; if XNN_UNPREDICTABLE(a1 != zero) { a1 = (const int8_t*) ((uintptr_t) a1 + a_offset); } const int8_t* restrict a2 = a[2]; if XNN_UNPREDICTABLE(a2 != zero) { a2 = (const int8_t*) ((uintptr_t) a2 + a_offset); } const int8_t* restrict a3 = a[3]; if XNN_UNPREDICTABLE(a3 != zero) { a3 = (const int8_t*) ((uintptr_t) a3 + a_offset); } const int8_t* restrict a4 = a[4]; if XNN_UNPREDICTABLE(a4 != zero) { a4 = (const int8_t*) ((uintptr_t) a4 + a_offset); } const int8_t* restrict a5 = a[5]; if XNN_UNPREDICTABLE(a5 != zero) { a5 = (const int8_t*) ((uintptr_t) a5 + a_offset); } const int8_t* restrict a6 = a[6]; if XNN_UNPREDICTABLE(a6 != zero) { a6 = (const int8_t*) ((uintptr_t) a6 + a_offset); } const int8_t* restrict a7 = a[7]; if XNN_UNPREDICTABLE(a7 != zero) { a7 = (const int8_t*) ((uintptr_t) a7 + a_offset); } a += 8; // Inner accumulation loop along the 8 columns. size_t k = kc; // 2x partial unrolled loop to load 8 bytes at a time. while (k >= 8 * sizeof(int8_t)) { // Load a 8x8 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); a0 += 8; const int8x8_t va1x01234567 = vld1_s8(a1); a1 += 8; const int8x8_t va2x01234567 = vld1_s8(a2); a2 += 8; const int8x8_t va3x01234567 = vld1_s8(a3); a3 += 8; const int8x8_t va4x01234567 = vld1_s8(a4); a4 += 8; const int8x8_t va5x01234567 = vld1_s8(a5); a5 += 8; const int8x8_t va6x01234567 = vld1_s8(a6); a6 += 8; const int8x8_t va7x01234567 = vld1_s8(a7); a7 += 8; // Load a 8x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb4567x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 8x8 * 8x8 --> 8x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0); vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0); vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0); vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0); vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb4567x0123, va0x01234567, 1); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb4567x4567, va0x01234567, 1); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb4567x0123, va1x01234567, 1); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb4567x4567, va1x01234567, 1); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb4567x0123, va2x01234567, 1); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb4567x4567, va2x01234567, 1); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb4567x0123, va3x01234567, 1); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb4567x4567, va3x01234567, 1); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb4567x0123, va4x01234567, 1); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb4567x4567, va4x01234567, 1); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb4567x0123, va5x01234567, 1); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb4567x4567, va5x01234567, 1); vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb4567x0123, va6x01234567, 1); vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb4567x4567, va6x01234567, 1); vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb4567x0123, va7x01234567, 1); vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb4567x4567, va7x01234567, 1); k -= 8 * sizeof(int8_t); } // Handle up to 4 final positions of `k` if XNN_UNLIKELY(k != 0) { // Load a 8x4 block of activations. const int8x8_t va0x01234567 = vld1_s8(a0); const int8x8_t va1x01234567 = vld1_s8(a1); const int8x8_t va2x01234567 = vld1_s8(a2); const int8x8_t va3x01234567 = vld1_s8(a3); const int8x8_t va4x01234567 = vld1_s8(a4); const int8x8_t va5x01234567 = vld1_s8(a5); const int8x8_t va6x01234567 = vld1_s8(a6); const int8x8_t va7x01234567 = vld1_s8(a7); // Load a 4x8 block of weights. const int8x16_t vb0123x0123 = vld1q_s8(w); w = (const int8_t*) w + 16; const int8x16_t vb0123x4567 = vld1q_s8(w); w = (const int8_t*) w + 16; // Multiply-accumulate: 8x4 * 4x8 --> 8x8. vacc0x0123 = vdotq_lane_s32(vacc0x0123, vb0123x0123, va0x01234567, 0); vacc0x4567 = vdotq_lane_s32(vacc0x4567, vb0123x4567, va0x01234567, 0); vacc1x0123 = vdotq_lane_s32(vacc1x0123, vb0123x0123, va1x01234567, 0); vacc1x4567 = vdotq_lane_s32(vacc1x4567, vb0123x4567, va1x01234567, 0); vacc2x0123 = vdotq_lane_s32(vacc2x0123, vb0123x0123, va2x01234567, 0); vacc2x4567 = vdotq_lane_s32(vacc2x4567, vb0123x4567, va2x01234567, 0); vacc3x0123 = vdotq_lane_s32(vacc3x0123, vb0123x0123, va3x01234567, 0); vacc3x4567 = vdotq_lane_s32(vacc3x4567, vb0123x4567, va3x01234567, 0); vacc4x0123 = vdotq_lane_s32(vacc4x0123, vb0123x0123, va4x01234567, 0); vacc4x4567 = vdotq_lane_s32(vacc4x4567, vb0123x4567, va4x01234567, 0); vacc5x0123 = vdotq_lane_s32(vacc5x0123, vb0123x0123, va5x01234567, 0); vacc5x4567 = vdotq_lane_s32(vacc5x4567, vb0123x4567, va5x01234567, 0); vacc6x0123 = vdotq_lane_s32(vacc6x0123, vb0123x0123, va6x01234567, 0); vacc6x4567 = vdotq_lane_s32(vacc6x4567, vb0123x4567, va6x01234567, 0); vacc7x0123 = vdotq_lane_s32(vacc7x0123, vb0123x0123, va7x01234567, 0); vacc7x4567 = vdotq_lane_s32(vacc7x4567, vb0123x4567, va7x01234567, 0); } p -= 8 * sizeof(void*); } while (p != 0); const int32x4_t vright_pre_shift = vld1q_dup_s32(&params->rndnu_neon.right_pre_shift); const int32x4_t vmultiplier = vld1q_dup_s32(&params->rndnu_neon.multiplier); const int32x4_t vright_post_shift = vld1q_dup_s32(&params->rndnu_neon.right_post_shift); vacc0x0123 = vshlq_s32(vacc0x0123, vright_pre_shift); vacc0x4567 = vshlq_s32(vacc0x4567, vright_pre_shift); vacc1x0123 = vshlq_s32(vacc1x0123, vright_pre_shift); vacc1x4567 = vshlq_s32(vacc1x4567, vright_pre_shift); vacc2x0123 = vshlq_s32(vacc2x0123, vright_pre_shift); vacc2x4567 = vshlq_s32(vacc2x4567, vright_pre_shift); vacc3x0123 = vshlq_s32(vacc3x0123, vright_pre_shift); vacc3x4567 = vshlq_s32(vacc3x4567, vright_pre_shift); vacc4x0123 = vshlq_s32(vacc4x0123, vright_pre_shift); vacc4x4567 = vshlq_s32(vacc4x4567, vright_pre_shift); vacc5x0123 = vshlq_s32(vacc5x0123, vright_pre_shift); vacc5x4567 = vshlq_s32(vacc5x4567, vright_pre_shift); vacc6x0123 = vshlq_s32(vacc6x0123, vright_pre_shift); vacc6x4567 = vshlq_s32(vacc6x4567, vright_pre_shift); vacc7x0123 = vshlq_s32(vacc7x0123, vright_pre_shift); vacc7x4567 = vshlq_s32(vacc7x4567, vright_pre_shift); vacc0x0123 = vqdmulhq_s32(vacc0x0123, vmultiplier); vacc0x4567 = vqdmulhq_s32(vacc0x4567, vmultiplier); vacc1x0123 = vqdmulhq_s32(vacc1x0123, vmultiplier); vacc1x4567 = vqdmulhq_s32(vacc1x4567, vmultiplier); vacc2x0123 = vqdmulhq_s32(vacc2x0123, vmultiplier); vacc2x4567 = vqdmulhq_s32(vacc2x4567, vmultiplier); vacc3x0123 = vqdmulhq_s32(vacc3x0123, vmultiplier); vacc3x4567 = vqdmulhq_s32(vacc3x4567, vmultiplier); vacc4x0123 = vqdmulhq_s32(vacc4x0123, vmultiplier); vacc4x4567 = vqdmulhq_s32(vacc4x4567, vmultiplier); vacc5x0123 = vqdmulhq_s32(vacc5x0123, vmultiplier); vacc5x4567 = vqdmulhq_s32(vacc5x4567, vmultiplier); vacc6x0123 = vqdmulhq_s32(vacc6x0123, vmultiplier); vacc6x4567 = vqdmulhq_s32(vacc6x4567, vmultiplier); vacc7x0123 = vqdmulhq_s32(vacc7x0123, vmultiplier); vacc7x4567 = vqdmulhq_s32(vacc7x4567, vmultiplier); vacc0x0123 = vrshlq_s32(vacc0x0123, vright_post_shift); vacc0x4567 = vrshlq_s32(vacc0x4567, vright_post_shift); vacc1x0123 = vrshlq_s32(vacc1x0123, vright_post_shift); vacc1x4567 = vrshlq_s32(vacc1x4567, vright_post_shift); vacc2x0123 = vrshlq_s32(vacc2x0123, vright_post_shift); vacc2x4567 = vrshlq_s32(vacc2x4567, vright_post_shift); vacc3x0123 = vrshlq_s32(vacc3x0123, vright_post_shift); vacc3x4567 = vrshlq_s32(vacc3x4567, vright_post_shift); vacc4x0123 = vrshlq_s32(vacc4x0123, vright_post_shift); vacc4x4567 = vrshlq_s32(vacc4x4567, vright_post_shift); vacc5x0123 = vrshlq_s32(vacc5x0123, vright_post_shift); vacc5x4567 = vrshlq_s32(vacc5x4567, vright_post_shift); vacc6x0123 = vrshlq_s32(vacc6x0123, vright_post_shift); vacc6x4567 = vrshlq_s32(vacc6x4567, vright_post_shift); vacc7x0123 = vrshlq_s32(vacc7x0123, vright_post_shift); vacc7x4567 = vrshlq_s32(vacc7x4567, vright_post_shift); const int16x8_t voutput_zero_point = vld1q_dup_s16(&params->rndnu_neon.output_zero_point); #if XNN_ARCH_ARM64 const int16x8_t vacc0x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc0x0123), vacc0x4567), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc1x0123), vacc1x4567), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc2x0123), vacc2x4567), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc3x0123), vacc3x4567), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc4x0123), vacc4x4567), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc5x0123), vacc5x4567), voutput_zero_point); const int16x8_t vacc6x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc6x0123), vacc6x4567), voutput_zero_point); const int16x8_t vacc7x01234567 = vqaddq_s16(vqmovn_high_s32(vqmovn_s32(vacc7x0123), vacc7x4567), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vqmovn_high_s16(vqmovn_s16(vacc0x01234567), vacc1x01234567); int8x16_t vout2x01234567_3x01234567 = vqmovn_high_s16(vqmovn_s16(vacc2x01234567), vacc3x01234567); int8x16_t vout4x01234567_5x01234567 = vqmovn_high_s16(vqmovn_s16(vacc4x01234567), vacc5x01234567); int8x16_t vout6x01234567_7x01234567 = vqmovn_high_s16(vqmovn_s16(vacc6x01234567), vacc7x01234567); #else const int16x8_t vacc0x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc0x0123), vqmovn_s32(vacc0x4567)), voutput_zero_point); const int16x8_t vacc1x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc1x0123), vqmovn_s32(vacc1x4567)), voutput_zero_point); const int16x8_t vacc2x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc2x0123), vqmovn_s32(vacc2x4567)), voutput_zero_point); const int16x8_t vacc3x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc3x0123), vqmovn_s32(vacc3x4567)), voutput_zero_point); const int16x8_t vacc4x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc4x0123), vqmovn_s32(vacc4x4567)), voutput_zero_point); const int16x8_t vacc5x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc5x0123), vqmovn_s32(vacc5x4567)), voutput_zero_point); const int16x8_t vacc6x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc6x0123), vqmovn_s32(vacc6x4567)), voutput_zero_point); const int16x8_t vacc7x01234567 = vqaddq_s16(vcombine_s16(vqmovn_s32(vacc7x0123), vqmovn_s32(vacc7x4567)), voutput_zero_point); int8x16_t vout0x01234567_1x01234567 = vcombine_s8(vqmovn_s16(vacc0x01234567), vqmovn_s16(vacc1x01234567)); int8x16_t vout2x01234567_3x01234567 = vcombine_s8(vqmovn_s16(vacc2x01234567), vqmovn_s16(vacc3x01234567)); int8x16_t vout4x01234567_5x01234567 = vcombine_s8(vqmovn_s16(vacc4x01234567), vqmovn_s16(vacc5x01234567)); int8x16_t vout6x01234567_7x01234567 = vcombine_s8(vqmovn_s16(vacc6x01234567), vqmovn_s16(vacc7x01234567)); #endif const int8x16_t voutput_min = vld1q_dup_s8(&params->rndnu_neon.output_min); const int8x16_t voutput_max = vld1q_dup_s8(&params->rndnu_neon.output_max); vout6x01234567_7x01234567 = vmaxq_s8(vout6x01234567_7x01234567, voutput_min); vout4x01234567_5x01234567 = vmaxq_s8(vout4x01234567_5x01234567, voutput_min); vout2x01234567_3x01234567 = vmaxq_s8(vout2x01234567_3x01234567, voutput_min); vout0x01234567_1x01234567 = vmaxq_s8(vout0x01234567_1x01234567, voutput_min); vout6x01234567_7x01234567 = vminq_s8(vout6x01234567_7x01234567, voutput_max); vout4x01234567_5x01234567 = vminq_s8(vout4x01234567_5x01234567, voutput_max); vout2x01234567_3x01234567 = vminq_s8(vout2x01234567_3x01234567, voutput_max); vout0x01234567_1x01234567 = vminq_s8(vout0x01234567_1x01234567, voutput_max); if (nc >= 8) { vst1_s8(c7 + 0, vget_high_s8(vout6x01234567_7x01234567)); vst1_s8(c6 + 0, vget_low_s8(vout6x01234567_7x01234567)); vst1_s8(c5 + 0, vget_high_s8(vout4x01234567_5x01234567)); vst1_s8(c4 + 0, vget_low_s8(vout4x01234567_5x01234567)); vst1_s8(c3 + 0, vget_high_s8(vout2x01234567_3x01234567)); vst1_s8(c2 + 0, vget_low_s8(vout2x01234567_3x01234567)); vst1_s8(c1 + 0, vget_high_s8(vout0x01234567_1x01234567)); vst1_s8(c0 + 0, vget_low_s8(vout0x01234567_1x01234567)); c7 = (int8_t*) ((uintptr_t) c7 + cn_stride); c6 = (int8_t*) ((uintptr_t) c6 + cn_stride); c5 = (int8_t*) ((uintptr_t) c5 + cn_stride); c4 = (int8_t*) ((uintptr_t) c4 + cn_stride); c3 = (int8_t*) ((uintptr_t) c3 + cn_stride); c2 = (int8_t*) ((uintptr_t) c2 + cn_stride); c1 = (int8_t*) ((uintptr_t) c1 + cn_stride); c0 = (int8_t*) ((uintptr_t) c0 + cn_stride); a = (const int8_t**restrict) ((uintptr_t) a - ks); nc -= 8; } else { if (nc & 4) { vst1q_lane_u32((void*) c7, vreinterpretq_u32_s8(vout6x01234567_7x01234567), 2); c7 += 4; vst1q_lane_u32((void*) c6, vreinterpretq_u32_s8(vout6x01234567_7x01234567), 0); c6 += 4; vst1q_lane_u32((void*) c5, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 2); c5 += 4; vst1q_lane_u32((void*) c4, vreinterpretq_u32_s8(vout4x01234567_5x01234567), 0); c4 += 4; vst1q_lane_u32((void*) c3, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 2); c3 += 4; vst1q_lane_u32((void*) c2, vreinterpretq_u32_s8(vout2x01234567_3x01234567), 0); c2 += 4; vst1q_lane_u32((void*) c1, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 2); c1 += 4; vst1q_lane_u32((void*) c0, vreinterpretq_u32_s8(vout0x01234567_1x01234567), 0); c0 += 4; vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 4); vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 4); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 4); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 4); } if (nc & 2) { vst1q_lane_u16((void*) c7, vreinterpretq_u16_s8(vout6x01234567_7x01234567), 4); c7 += 2; vst1q_lane_u16((void*) c6, vreinterpretq_u16_s8(vout6x01234567_7x01234567), 0); c6 += 2; vst1q_lane_u16((void*) c5, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 4); c5 += 2; vst1q_lane_u16((void*) c4, vreinterpretq_u16_s8(vout4x01234567_5x01234567), 0); c4 += 2; vst1q_lane_u16((void*) c3, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 4); c3 += 2; vst1q_lane_u16((void*) c2, vreinterpretq_u16_s8(vout2x01234567_3x01234567), 0); c2 += 2; vst1q_lane_u16((void*) c1, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 4); c1 += 2; vst1q_lane_u16((void*) c0, vreinterpretq_u16_s8(vout0x01234567_1x01234567), 0); c0 += 2; vout6x01234567_7x01234567 = vextq_s8(vout6x01234567_7x01234567, vout6x01234567_7x01234567, 2); vout4x01234567_5x01234567 = vextq_s8(vout4x01234567_5x01234567, vout4x01234567_5x01234567, 2); vout2x01234567_3x01234567 = vextq_s8(vout2x01234567_3x01234567, vout2x01234567_3x01234567, 2); vout0x01234567_1x01234567 = vextq_s8(vout0x01234567_1x01234567, vout0x01234567_1x01234567, 2); } if (nc & 1) { vst1q_lane_s8(c7, vout6x01234567_7x01234567, 8); vst1q_lane_s8(c6, vout6x01234567_7x01234567, 0); vst1q_lane_s8(c5, vout4x01234567_5x01234567, 8); vst1q_lane_s8(c4, vout4x01234567_5x01234567, 0); vst1q_lane_s8(c3, vout2x01234567_3x01234567, 8); vst1q_lane_s8(c2, vout2x01234567_3x01234567, 0); vst1q_lane_s8(c1, vout0x01234567_1x01234567, 8); vst1q_lane_s8(c0, vout0x01234567_1x01234567, 0); } nc = 0; } } while (nc != 0); }
20,368
51.906494
130
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p1c-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p1c__scalar_fmagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; do { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0++; const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1++; const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2++; const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3++; const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4++; const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5++; const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6++; const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7++; const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8++; const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9++; const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10++; const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11++; const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12++; const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13++; const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14++; const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15++; const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16++; const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17++; const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18++; const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19++; const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20++; const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21++; const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22++; const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23++; const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24++; const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24]; vacc += vi24 * vk24; w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t)); const float vscale = unaligned_load_f32(w); w = (const void*) ((const float*) w + 1); float vfpacc = (float) vacc * vscale; vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point); vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point; *output++ = (int8_t) vout; } while (--c != 0); output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
10,002
37.179389
114
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p1c-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p1c__scalar_imagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; do { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0++; const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1++; const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2++; const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3++; const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4++; const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5++; const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6++; const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7++; const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8++; const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9++; const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10++; const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11++; const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12++; const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13++; const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14++; const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15++; const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16++; const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17++; const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18++; const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19++; const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20++; const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21++; const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22++; const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23++; const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24++; const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24]; vacc += vi24 * vk24; w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t)); const float vscale = unaligned_load_f32(w); w = (const void*) ((const float*) w + 1); float vfpacc = (float) vacc * vscale; vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc); vout = math_max_s32(vout, vmagic_min); vout = math_min_s32(vout, vmagic_max); vout -= vmagic_bias_less_zero_point; *output++ = (int8_t) vout; } while (--c != 0); output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
9,888
36.60076
100
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p1c-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p1c__scalar_lrintf( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; do { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0++; const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1++; const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2++; const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3++; const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4++; const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5++; const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6++; const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7++; const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8++; const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9++; const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10++; const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11++; const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12++; const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13++; const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14++; const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15++; const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16++; const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17++; const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18++; const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19++; const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20++; const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21++; const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22++; const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23++; const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24++; const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24]; vacc += vi24 * vk24; w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t)); const float vscale = unaligned_load_f32(w); w = (const void*) ((const float*) w + 1); float vfpacc = (float) vacc * vscale; vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point); const int32_t vrndacc = (int32_t) lrintf(vfpacc); int32_t vout = vrndacc + voutput_zero_point; *output++ = (int8_t) vout; } while (--c != 0); output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
9,906
36.812977
98
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p1c-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p1c__wasm_fmagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; do { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0++; const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1++; const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2++; const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3++; const int32_t vk3 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[3]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4++; const int32_t vk4 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[4]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5++; const int32_t vk5 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[5]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6++; const int32_t vk6 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[6]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7++; const int32_t vk7 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[7]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8++; const int32_t vk8 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[8]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9++; const int32_t vk9 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[9]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10++; const int32_t vk10 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[10]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11++; const int32_t vk11 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[11]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12++; const int32_t vk12 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[12]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13++; const int32_t vk13 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[13]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14++; const int32_t vk14 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[14]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15++; const int32_t vk15 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[15]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16++; const int32_t vk16 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[16]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17++; const int32_t vk17 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[17]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18++; const int32_t vk18 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[18]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19++; const int32_t vk19 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[19]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20++; const int32_t vk20 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[20]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21++; const int32_t vk21 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[21]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22++; const int32_t vk22 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[22]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23++; const int32_t vk23 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[23]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24++; const int32_t vk24 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[24]; vacc += vi24 * vk24; w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 25 * sizeof(int8_t)); const float vscale = unaligned_load_f32(w); w = (const void*) ((const float*) w + 1); float vfpacc = (float) vacc * vscale; vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point); vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point; *output++ = (int8_t) vout; } while (--c != 0); output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
10,020
37.248092
114
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p2c-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p2c__scalar_fmagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= 2; c -= 2) { int32_t vacc0 = unaligned_indexed_load_s32(w, 0); int32_t vacc1 = unaligned_indexed_load_s32(w, 1); const int32_t vi0x0 = (int32_t) i0[0]; const int32_t vi0x1 = (int32_t) i0[1]; i0 += 2; const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1]; vacc0 += vi0x0 * vk0x0; vacc1 += vi0x1 * vk0x1; const int32_t vi1x0 = (int32_t) i1[0]; const int32_t vi1x1 = (int32_t) i1[1]; i1 += 2; const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3]; vacc0 += vi1x0 * vk1x0; vacc1 += vi1x1 * vk1x1; const int32_t vi2x0 = (int32_t) i2[0]; const int32_t vi2x1 = (int32_t) i2[1]; i2 += 2; const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5]; vacc0 += vi2x0 * vk2x0; vacc1 += vi2x1 * vk2x1; const int32_t vi3x0 = (int32_t) i3[0]; const int32_t vi3x1 = (int32_t) i3[1]; i3 += 2; const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7]; vacc0 += vi3x0 * vk3x0; vacc1 += vi3x1 * vk3x1; const int32_t vi4x0 = (int32_t) i4[0]; const int32_t vi4x1 = (int32_t) i4[1]; i4 += 2; const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9]; vacc0 += vi4x0 * vk4x0; vacc1 += vi4x1 * vk4x1; const int32_t vi5x0 = (int32_t) i5[0]; const int32_t vi5x1 = (int32_t) i5[1]; i5 += 2; const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11]; vacc0 += vi5x0 * vk5x0; vacc1 += vi5x1 * vk5x1; const int32_t vi6x0 = (int32_t) i6[0]; const int32_t vi6x1 = (int32_t) i6[1]; i6 += 2; const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13]; vacc0 += vi6x0 * vk6x0; vacc1 += vi6x1 * vk6x1; const int32_t vi7x0 = (int32_t) i7[0]; const int32_t vi7x1 = (int32_t) i7[1]; i7 += 2; const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15]; vacc0 += vi7x0 * vk7x0; vacc1 += vi7x1 * vk7x1; const int32_t vi8x0 = (int32_t) i8[0]; const int32_t vi8x1 = (int32_t) i8[1]; i8 += 2; const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17]; vacc0 += vi8x0 * vk8x0; vacc1 += vi8x1 * vk8x1; const int32_t vi9x0 = (int32_t) i9[0]; const int32_t vi9x1 = (int32_t) i9[1]; i9 += 2; const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19]; vacc0 += vi9x0 * vk9x0; vacc1 += vi9x1 * vk9x1; const int32_t vi10x0 = (int32_t) i10[0]; const int32_t vi10x1 = (int32_t) i10[1]; i10 += 2; const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21]; vacc0 += vi10x0 * vk10x0; vacc1 += vi10x1 * vk10x1; const int32_t vi11x0 = (int32_t) i11[0]; const int32_t vi11x1 = (int32_t) i11[1]; i11 += 2; const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23]; vacc0 += vi11x0 * vk11x0; vacc1 += vi11x1 * vk11x1; const int32_t vi12x0 = (int32_t) i12[0]; const int32_t vi12x1 = (int32_t) i12[1]; i12 += 2; const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25]; vacc0 += vi12x0 * vk12x0; vacc1 += vi12x1 * vk12x1; const int32_t vi13x0 = (int32_t) i13[0]; const int32_t vi13x1 = (int32_t) i13[1]; i13 += 2; const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27]; vacc0 += vi13x0 * vk13x0; vacc1 += vi13x1 * vk13x1; const int32_t vi14x0 = (int32_t) i14[0]; const int32_t vi14x1 = (int32_t) i14[1]; i14 += 2; const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29]; vacc0 += vi14x0 * vk14x0; vacc1 += vi14x1 * vk14x1; const int32_t vi15x0 = (int32_t) i15[0]; const int32_t vi15x1 = (int32_t) i15[1]; i15 += 2; const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31]; vacc0 += vi15x0 * vk15x0; vacc1 += vi15x1 * vk15x1; const int32_t vi16x0 = (int32_t) i16[0]; const int32_t vi16x1 = (int32_t) i16[1]; i16 += 2; const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33]; vacc0 += vi16x0 * vk16x0; vacc1 += vi16x1 * vk16x1; const int32_t vi17x0 = (int32_t) i17[0]; const int32_t vi17x1 = (int32_t) i17[1]; i17 += 2; const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35]; vacc0 += vi17x0 * vk17x0; vacc1 += vi17x1 * vk17x1; const int32_t vi18x0 = (int32_t) i18[0]; const int32_t vi18x1 = (int32_t) i18[1]; i18 += 2; const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37]; vacc0 += vi18x0 * vk18x0; vacc1 += vi18x1 * vk18x1; const int32_t vi19x0 = (int32_t) i19[0]; const int32_t vi19x1 = (int32_t) i19[1]; i19 += 2; const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39]; vacc0 += vi19x0 * vk19x0; vacc1 += vi19x1 * vk19x1; const int32_t vi20x0 = (int32_t) i20[0]; const int32_t vi20x1 = (int32_t) i20[1]; i20 += 2; const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41]; vacc0 += vi20x0 * vk20x0; vacc1 += vi20x1 * vk20x1; const int32_t vi21x0 = (int32_t) i21[0]; const int32_t vi21x1 = (int32_t) i21[1]; i21 += 2; const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43]; vacc0 += vi21x0 * vk21x0; vacc1 += vi21x1 * vk21x1; const int32_t vi22x0 = (int32_t) i22[0]; const int32_t vi22x1 = (int32_t) i22[1]; i22 += 2; const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45]; vacc0 += vi22x0 * vk22x0; vacc1 += vi22x1 * vk22x1; const int32_t vi23x0 = (int32_t) i23[0]; const int32_t vi23x1 = (int32_t) i23[1]; i23 += 2; const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47]; vacc0 += vi23x0 * vk23x0; vacc1 += vi23x1 * vk23x1; const int32_t vi24x0 = (int32_t) i24[0]; const int32_t vi24x1 = (int32_t) i24[1]; i24 += 2; const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49]; vacc0 += vi24x0 * vk24x0; vacc1 += vi24x1 * vk24x1; w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t)); float vfpacc0 = (float) vacc0; float vfpacc1 = (float) vacc1; const float vscale0 = unaligned_indexed_load_f32(w, 0); const float vscale1 = unaligned_indexed_load_f32(w, 1); w = (const void*) ((const float*) w + 2); vfpacc0 *= vscale0; vfpacc1 *= vscale1; vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point); vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point); vfpacc0 += vmagic_bias; vfpacc1 += vmagic_bias; int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point; output[0] = (int8_t) vout0; output[1] = (int8_t) vout1; output += 2; } if XNN_UNLIKELY(c != 0) { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0; const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1; const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2; const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3; const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4; const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5; const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6; const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7; const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8; const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9; const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10; const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11; const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12; const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13; const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14; const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15; const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16; const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17; const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18; const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19; const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20; const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21; const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22; const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23; const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24; const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; vacc += vi24 * vk24; const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t))); float vfpacc = (float) vacc * vscale; vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point); vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point; *output++ = (int8_t) vout; } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
20,706
37.204797
121
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p2c-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p2c__scalar_imagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= 2; c -= 2) { int32_t vacc0 = unaligned_indexed_load_s32(w, 0); int32_t vacc1 = unaligned_indexed_load_s32(w, 1); const int32_t vi0x0 = (int32_t) i0[0]; const int32_t vi0x1 = (int32_t) i0[1]; i0 += 2; const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1]; vacc0 += vi0x0 * vk0x0; vacc1 += vi0x1 * vk0x1; const int32_t vi1x0 = (int32_t) i1[0]; const int32_t vi1x1 = (int32_t) i1[1]; i1 += 2; const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3]; vacc0 += vi1x0 * vk1x0; vacc1 += vi1x1 * vk1x1; const int32_t vi2x0 = (int32_t) i2[0]; const int32_t vi2x1 = (int32_t) i2[1]; i2 += 2; const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5]; vacc0 += vi2x0 * vk2x0; vacc1 += vi2x1 * vk2x1; const int32_t vi3x0 = (int32_t) i3[0]; const int32_t vi3x1 = (int32_t) i3[1]; i3 += 2; const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7]; vacc0 += vi3x0 * vk3x0; vacc1 += vi3x1 * vk3x1; const int32_t vi4x0 = (int32_t) i4[0]; const int32_t vi4x1 = (int32_t) i4[1]; i4 += 2; const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9]; vacc0 += vi4x0 * vk4x0; vacc1 += vi4x1 * vk4x1; const int32_t vi5x0 = (int32_t) i5[0]; const int32_t vi5x1 = (int32_t) i5[1]; i5 += 2; const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11]; vacc0 += vi5x0 * vk5x0; vacc1 += vi5x1 * vk5x1; const int32_t vi6x0 = (int32_t) i6[0]; const int32_t vi6x1 = (int32_t) i6[1]; i6 += 2; const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13]; vacc0 += vi6x0 * vk6x0; vacc1 += vi6x1 * vk6x1; const int32_t vi7x0 = (int32_t) i7[0]; const int32_t vi7x1 = (int32_t) i7[1]; i7 += 2; const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15]; vacc0 += vi7x0 * vk7x0; vacc1 += vi7x1 * vk7x1; const int32_t vi8x0 = (int32_t) i8[0]; const int32_t vi8x1 = (int32_t) i8[1]; i8 += 2; const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17]; vacc0 += vi8x0 * vk8x0; vacc1 += vi8x1 * vk8x1; const int32_t vi9x0 = (int32_t) i9[0]; const int32_t vi9x1 = (int32_t) i9[1]; i9 += 2; const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19]; vacc0 += vi9x0 * vk9x0; vacc1 += vi9x1 * vk9x1; const int32_t vi10x0 = (int32_t) i10[0]; const int32_t vi10x1 = (int32_t) i10[1]; i10 += 2; const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21]; vacc0 += vi10x0 * vk10x0; vacc1 += vi10x1 * vk10x1; const int32_t vi11x0 = (int32_t) i11[0]; const int32_t vi11x1 = (int32_t) i11[1]; i11 += 2; const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23]; vacc0 += vi11x0 * vk11x0; vacc1 += vi11x1 * vk11x1; const int32_t vi12x0 = (int32_t) i12[0]; const int32_t vi12x1 = (int32_t) i12[1]; i12 += 2; const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25]; vacc0 += vi12x0 * vk12x0; vacc1 += vi12x1 * vk12x1; const int32_t vi13x0 = (int32_t) i13[0]; const int32_t vi13x1 = (int32_t) i13[1]; i13 += 2; const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27]; vacc0 += vi13x0 * vk13x0; vacc1 += vi13x1 * vk13x1; const int32_t vi14x0 = (int32_t) i14[0]; const int32_t vi14x1 = (int32_t) i14[1]; i14 += 2; const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29]; vacc0 += vi14x0 * vk14x0; vacc1 += vi14x1 * vk14x1; const int32_t vi15x0 = (int32_t) i15[0]; const int32_t vi15x1 = (int32_t) i15[1]; i15 += 2; const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31]; vacc0 += vi15x0 * vk15x0; vacc1 += vi15x1 * vk15x1; const int32_t vi16x0 = (int32_t) i16[0]; const int32_t vi16x1 = (int32_t) i16[1]; i16 += 2; const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33]; vacc0 += vi16x0 * vk16x0; vacc1 += vi16x1 * vk16x1; const int32_t vi17x0 = (int32_t) i17[0]; const int32_t vi17x1 = (int32_t) i17[1]; i17 += 2; const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35]; vacc0 += vi17x0 * vk17x0; vacc1 += vi17x1 * vk17x1; const int32_t vi18x0 = (int32_t) i18[0]; const int32_t vi18x1 = (int32_t) i18[1]; i18 += 2; const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37]; vacc0 += vi18x0 * vk18x0; vacc1 += vi18x1 * vk18x1; const int32_t vi19x0 = (int32_t) i19[0]; const int32_t vi19x1 = (int32_t) i19[1]; i19 += 2; const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39]; vacc0 += vi19x0 * vk19x0; vacc1 += vi19x1 * vk19x1; const int32_t vi20x0 = (int32_t) i20[0]; const int32_t vi20x1 = (int32_t) i20[1]; i20 += 2; const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41]; vacc0 += vi20x0 * vk20x0; vacc1 += vi20x1 * vk20x1; const int32_t vi21x0 = (int32_t) i21[0]; const int32_t vi21x1 = (int32_t) i21[1]; i21 += 2; const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43]; vacc0 += vi21x0 * vk21x0; vacc1 += vi21x1 * vk21x1; const int32_t vi22x0 = (int32_t) i22[0]; const int32_t vi22x1 = (int32_t) i22[1]; i22 += 2; const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45]; vacc0 += vi22x0 * vk22x0; vacc1 += vi22x1 * vk22x1; const int32_t vi23x0 = (int32_t) i23[0]; const int32_t vi23x1 = (int32_t) i23[1]; i23 += 2; const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47]; vacc0 += vi23x0 * vk23x0; vacc1 += vi23x1 * vk23x1; const int32_t vi24x0 = (int32_t) i24[0]; const int32_t vi24x1 = (int32_t) i24[1]; i24 += 2; const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49]; vacc0 += vi24x0 * vk24x0; vacc1 += vi24x1 * vk24x1; w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t)); float vfpacc0 = (float) vacc0; float vfpacc1 = (float) vacc1; const float vscale0 = unaligned_indexed_load_f32(w, 0); const float vscale1 = unaligned_indexed_load_f32(w, 1); w = (const void*) ((const float*) w + 2); vfpacc0 *= vscale0; vfpacc1 *= vscale1; vfpacc0 += vmagic_bias; vfpacc1 += vmagic_bias; int32_t vout0 = (int32_t) float_as_uint32(vfpacc0); int32_t vout1 = (int32_t) float_as_uint32(vfpacc1); vout0 = math_max_s32(vout0, vmagic_min); vout1 = math_max_s32(vout1, vmagic_min); vout0 = math_min_s32(vout0, vmagic_max); vout1 = math_min_s32(vout1, vmagic_max); vout0 -= vmagic_bias_less_zero_point; vout1 -= vmagic_bias_less_zero_point; output[0] = (int8_t) vout0; output[1] = (int8_t) vout1; output += 2; } if XNN_UNLIKELY(c != 0) { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0; const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1; const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2; const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3; const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4; const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5; const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6; const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7; const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8; const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9; const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10; const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11; const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12; const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13; const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14; const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15; const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16; const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17; const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18; const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19; const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20; const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21; const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22; const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23; const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24; const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; vacc += vi24 * vk24; const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t))); float vfpacc = (float) vacc * vscale; vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc); vout = math_max_s32(vout, vmagic_min); vout = math_min_s32(vout, vmagic_max); vout -= vmagic_bias_less_zero_point; *output++ = (int8_t) vout; } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
20,523
36.589744
121
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p2c-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p2c__scalar_lrintf( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= 2; c -= 2) { int32_t vacc0 = unaligned_indexed_load_s32(w, 0); int32_t vacc1 = unaligned_indexed_load_s32(w, 1); const int32_t vi0x0 = (int32_t) i0[0]; const int32_t vi0x1 = (int32_t) i0[1]; i0 += 2; const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1]; vacc0 += vi0x0 * vk0x0; vacc1 += vi0x1 * vk0x1; const int32_t vi1x0 = (int32_t) i1[0]; const int32_t vi1x1 = (int32_t) i1[1]; i1 += 2; const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3]; vacc0 += vi1x0 * vk1x0; vacc1 += vi1x1 * vk1x1; const int32_t vi2x0 = (int32_t) i2[0]; const int32_t vi2x1 = (int32_t) i2[1]; i2 += 2; const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5]; vacc0 += vi2x0 * vk2x0; vacc1 += vi2x1 * vk2x1; const int32_t vi3x0 = (int32_t) i3[0]; const int32_t vi3x1 = (int32_t) i3[1]; i3 += 2; const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7]; vacc0 += vi3x0 * vk3x0; vacc1 += vi3x1 * vk3x1; const int32_t vi4x0 = (int32_t) i4[0]; const int32_t vi4x1 = (int32_t) i4[1]; i4 += 2; const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9]; vacc0 += vi4x0 * vk4x0; vacc1 += vi4x1 * vk4x1; const int32_t vi5x0 = (int32_t) i5[0]; const int32_t vi5x1 = (int32_t) i5[1]; i5 += 2; const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11]; vacc0 += vi5x0 * vk5x0; vacc1 += vi5x1 * vk5x1; const int32_t vi6x0 = (int32_t) i6[0]; const int32_t vi6x1 = (int32_t) i6[1]; i6 += 2; const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13]; vacc0 += vi6x0 * vk6x0; vacc1 += vi6x1 * vk6x1; const int32_t vi7x0 = (int32_t) i7[0]; const int32_t vi7x1 = (int32_t) i7[1]; i7 += 2; const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15]; vacc0 += vi7x0 * vk7x0; vacc1 += vi7x1 * vk7x1; const int32_t vi8x0 = (int32_t) i8[0]; const int32_t vi8x1 = (int32_t) i8[1]; i8 += 2; const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17]; vacc0 += vi8x0 * vk8x0; vacc1 += vi8x1 * vk8x1; const int32_t vi9x0 = (int32_t) i9[0]; const int32_t vi9x1 = (int32_t) i9[1]; i9 += 2; const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19]; vacc0 += vi9x0 * vk9x0; vacc1 += vi9x1 * vk9x1; const int32_t vi10x0 = (int32_t) i10[0]; const int32_t vi10x1 = (int32_t) i10[1]; i10 += 2; const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21]; vacc0 += vi10x0 * vk10x0; vacc1 += vi10x1 * vk10x1; const int32_t vi11x0 = (int32_t) i11[0]; const int32_t vi11x1 = (int32_t) i11[1]; i11 += 2; const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23]; vacc0 += vi11x0 * vk11x0; vacc1 += vi11x1 * vk11x1; const int32_t vi12x0 = (int32_t) i12[0]; const int32_t vi12x1 = (int32_t) i12[1]; i12 += 2; const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25]; vacc0 += vi12x0 * vk12x0; vacc1 += vi12x1 * vk12x1; const int32_t vi13x0 = (int32_t) i13[0]; const int32_t vi13x1 = (int32_t) i13[1]; i13 += 2; const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27]; vacc0 += vi13x0 * vk13x0; vacc1 += vi13x1 * vk13x1; const int32_t vi14x0 = (int32_t) i14[0]; const int32_t vi14x1 = (int32_t) i14[1]; i14 += 2; const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29]; vacc0 += vi14x0 * vk14x0; vacc1 += vi14x1 * vk14x1; const int32_t vi15x0 = (int32_t) i15[0]; const int32_t vi15x1 = (int32_t) i15[1]; i15 += 2; const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31]; vacc0 += vi15x0 * vk15x0; vacc1 += vi15x1 * vk15x1; const int32_t vi16x0 = (int32_t) i16[0]; const int32_t vi16x1 = (int32_t) i16[1]; i16 += 2; const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33]; vacc0 += vi16x0 * vk16x0; vacc1 += vi16x1 * vk16x1; const int32_t vi17x0 = (int32_t) i17[0]; const int32_t vi17x1 = (int32_t) i17[1]; i17 += 2; const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35]; vacc0 += vi17x0 * vk17x0; vacc1 += vi17x1 * vk17x1; const int32_t vi18x0 = (int32_t) i18[0]; const int32_t vi18x1 = (int32_t) i18[1]; i18 += 2; const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37]; vacc0 += vi18x0 * vk18x0; vacc1 += vi18x1 * vk18x1; const int32_t vi19x0 = (int32_t) i19[0]; const int32_t vi19x1 = (int32_t) i19[1]; i19 += 2; const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39]; vacc0 += vi19x0 * vk19x0; vacc1 += vi19x1 * vk19x1; const int32_t vi20x0 = (int32_t) i20[0]; const int32_t vi20x1 = (int32_t) i20[1]; i20 += 2; const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41]; vacc0 += vi20x0 * vk20x0; vacc1 += vi20x1 * vk20x1; const int32_t vi21x0 = (int32_t) i21[0]; const int32_t vi21x1 = (int32_t) i21[1]; i21 += 2; const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43]; vacc0 += vi21x0 * vk21x0; vacc1 += vi21x1 * vk21x1; const int32_t vi22x0 = (int32_t) i22[0]; const int32_t vi22x1 = (int32_t) i22[1]; i22 += 2; const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45]; vacc0 += vi22x0 * vk22x0; vacc1 += vi22x1 * vk22x1; const int32_t vi23x0 = (int32_t) i23[0]; const int32_t vi23x1 = (int32_t) i23[1]; i23 += 2; const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47]; vacc0 += vi23x0 * vk23x0; vacc1 += vi23x1 * vk23x1; const int32_t vi24x0 = (int32_t) i24[0]; const int32_t vi24x1 = (int32_t) i24[1]; i24 += 2; const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49]; vacc0 += vi24x0 * vk24x0; vacc1 += vi24x1 * vk24x1; w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t)); float vfpacc0 = (float) vacc0; float vfpacc1 = (float) vacc1; const float vscale0 = unaligned_indexed_load_f32(w, 0); const float vscale1 = unaligned_indexed_load_f32(w, 1); w = (const void*) ((const float*) w + 2); vfpacc0 *= vscale0; vfpacc1 *= vscale1; vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point); vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point); const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1); int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point; int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point; output[0] = (int8_t) vout0; output[1] = (int8_t) vout1; output += 2; } if XNN_UNLIKELY(c != 0) { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0; const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1; const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2; const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3; const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4; const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5; const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6; const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7; const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8; const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9; const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10; const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11; const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12; const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13; const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14; const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15; const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16; const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17; const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18; const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19; const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20; const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21; const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22; const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23; const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24; const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; vacc += vi24 * vk24; const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t))); float vfpacc = (float) vacc * vscale; vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point); const int32_t vrndacc = (int32_t) lrintf(vfpacc); int32_t vout = vrndacc + voutput_zero_point; *output++ = (int8_t) vout; } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
20,602
37.012915
121
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-25p2c-minmax-fp32-wasm-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_25p2c__wasm_fmagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } const int8_t* i3 = input[3]; assert(i3 != NULL); if XNN_UNPREDICTABLE(i3 != zero) { i3 = (const int8_t*) ((uintptr_t) i3 + input_offset); } const int8_t* i4 = input[4]; assert(i4 != NULL); if XNN_UNPREDICTABLE(i4 != zero) { i4 = (const int8_t*) ((uintptr_t) i4 + input_offset); } const int8_t* i5 = input[5]; assert(i5 != NULL); if XNN_UNPREDICTABLE(i5 != zero) { i5 = (const int8_t*) ((uintptr_t) i5 + input_offset); } const int8_t* i6 = input[6]; assert(i6 != NULL); if XNN_UNPREDICTABLE(i6 != zero) { i6 = (const int8_t*) ((uintptr_t) i6 + input_offset); } const int8_t* i7 = input[7]; assert(i7 != NULL); if XNN_UNPREDICTABLE(i7 != zero) { i7 = (const int8_t*) ((uintptr_t) i7 + input_offset); } const int8_t* i8 = input[8]; assert(i8 != NULL); if XNN_UNPREDICTABLE(i8 != zero) { i8 = (const int8_t*) ((uintptr_t) i8 + input_offset); } const int8_t* i9 = input[9]; assert(i9 != NULL); if XNN_UNPREDICTABLE(i9 != zero) { i9 = (const int8_t*) ((uintptr_t) i9 + input_offset); } const int8_t* i10 = input[10]; assert(i10 != NULL); if XNN_UNPREDICTABLE(i10 != zero) { i10 = (const int8_t*) ((uintptr_t) i10 + input_offset); } const int8_t* i11 = input[11]; assert(i11 != NULL); if XNN_UNPREDICTABLE(i11 != zero) { i11 = (const int8_t*) ((uintptr_t) i11 + input_offset); } const int8_t* i12 = input[12]; assert(i12 != NULL); if XNN_UNPREDICTABLE(i12 != zero) { i12 = (const int8_t*) ((uintptr_t) i12 + input_offset); } const int8_t* i13 = input[13]; assert(i13 != NULL); if XNN_UNPREDICTABLE(i13 != zero) { i13 = (const int8_t*) ((uintptr_t) i13 + input_offset); } const int8_t* i14 = input[14]; assert(i14 != NULL); if XNN_UNPREDICTABLE(i14 != zero) { i14 = (const int8_t*) ((uintptr_t) i14 + input_offset); } const int8_t* i15 = input[15]; assert(i15 != NULL); if XNN_UNPREDICTABLE(i15 != zero) { i15 = (const int8_t*) ((uintptr_t) i15 + input_offset); } const int8_t* i16 = input[16]; assert(i16 != NULL); if XNN_UNPREDICTABLE(i16 != zero) { i16 = (const int8_t*) ((uintptr_t) i16 + input_offset); } const int8_t* i17 = input[17]; assert(i17 != NULL); if XNN_UNPREDICTABLE(i17 != zero) { i17 = (const int8_t*) ((uintptr_t) i17 + input_offset); } const int8_t* i18 = input[18]; assert(i18 != NULL); if XNN_UNPREDICTABLE(i18 != zero) { i18 = (const int8_t*) ((uintptr_t) i18 + input_offset); } const int8_t* i19 = input[19]; assert(i19 != NULL); if XNN_UNPREDICTABLE(i19 != zero) { i19 = (const int8_t*) ((uintptr_t) i19 + input_offset); } const int8_t* i20 = input[20]; assert(i20 != NULL); if XNN_UNPREDICTABLE(i20 != zero) { i20 = (const int8_t*) ((uintptr_t) i20 + input_offset); } const int8_t* i21 = input[21]; assert(i21 != NULL); if XNN_UNPREDICTABLE(i21 != zero) { i21 = (const int8_t*) ((uintptr_t) i21 + input_offset); } const int8_t* i22 = input[22]; assert(i22 != NULL); if XNN_UNPREDICTABLE(i22 != zero) { i22 = (const int8_t*) ((uintptr_t) i22 + input_offset); } const int8_t* i23 = input[23]; assert(i23 != NULL); if XNN_UNPREDICTABLE(i23 != zero) { i23 = (const int8_t*) ((uintptr_t) i23 + input_offset); } const int8_t* i24 = input[24]; assert(i24 != NULL); if XNN_UNPREDICTABLE(i24 != zero) { i24 = (const int8_t*) ((uintptr_t) i24 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= 2; c -= 2) { int32_t vacc0 = unaligned_indexed_load_s32(w, 0); int32_t vacc1 = unaligned_indexed_load_s32(w, 1); const int32_t vi0x0 = (int32_t) i0[0]; const int32_t vi0x1 = (int32_t) i0[1]; i0 += 2; const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1]; vacc0 += vi0x0 * vk0x0; vacc1 += vi0x1 * vk0x1; const int32_t vi1x0 = (int32_t) i1[0]; const int32_t vi1x1 = (int32_t) i1[1]; i1 += 2; const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3]; vacc0 += vi1x0 * vk1x0; vacc1 += vi1x1 * vk1x1; const int32_t vi2x0 = (int32_t) i2[0]; const int32_t vi2x1 = (int32_t) i2[1]; i2 += 2; const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5]; vacc0 += vi2x0 * vk2x0; vacc1 += vi2x1 * vk2x1; const int32_t vi3x0 = (int32_t) i3[0]; const int32_t vi3x1 = (int32_t) i3[1]; i3 += 2; const int32_t vk3x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; const int32_t vk3x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[7]; vacc0 += vi3x0 * vk3x0; vacc1 += vi3x1 * vk3x1; const int32_t vi4x0 = (int32_t) i4[0]; const int32_t vi4x1 = (int32_t) i4[1]; i4 += 2; const int32_t vk4x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; const int32_t vk4x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[9]; vacc0 += vi4x0 * vk4x0; vacc1 += vi4x1 * vk4x1; const int32_t vi5x0 = (int32_t) i5[0]; const int32_t vi5x1 = (int32_t) i5[1]; i5 += 2; const int32_t vk5x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; const int32_t vk5x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[11]; vacc0 += vi5x0 * vk5x0; vacc1 += vi5x1 * vk5x1; const int32_t vi6x0 = (int32_t) i6[0]; const int32_t vi6x1 = (int32_t) i6[1]; i6 += 2; const int32_t vk6x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; const int32_t vk6x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[13]; vacc0 += vi6x0 * vk6x0; vacc1 += vi6x1 * vk6x1; const int32_t vi7x0 = (int32_t) i7[0]; const int32_t vi7x1 = (int32_t) i7[1]; i7 += 2; const int32_t vk7x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; const int32_t vk7x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[15]; vacc0 += vi7x0 * vk7x0; vacc1 += vi7x1 * vk7x1; const int32_t vi8x0 = (int32_t) i8[0]; const int32_t vi8x1 = (int32_t) i8[1]; i8 += 2; const int32_t vk8x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; const int32_t vk8x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[17]; vacc0 += vi8x0 * vk8x0; vacc1 += vi8x1 * vk8x1; const int32_t vi9x0 = (int32_t) i9[0]; const int32_t vi9x1 = (int32_t) i9[1]; i9 += 2; const int32_t vk9x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; const int32_t vk9x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[19]; vacc0 += vi9x0 * vk9x0; vacc1 += vi9x1 * vk9x1; const int32_t vi10x0 = (int32_t) i10[0]; const int32_t vi10x1 = (int32_t) i10[1]; i10 += 2; const int32_t vk10x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; const int32_t vk10x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[21]; vacc0 += vi10x0 * vk10x0; vacc1 += vi10x1 * vk10x1; const int32_t vi11x0 = (int32_t) i11[0]; const int32_t vi11x1 = (int32_t) i11[1]; i11 += 2; const int32_t vk11x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; const int32_t vk11x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[23]; vacc0 += vi11x0 * vk11x0; vacc1 += vi11x1 * vk11x1; const int32_t vi12x0 = (int32_t) i12[0]; const int32_t vi12x1 = (int32_t) i12[1]; i12 += 2; const int32_t vk12x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; const int32_t vk12x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[25]; vacc0 += vi12x0 * vk12x0; vacc1 += vi12x1 * vk12x1; const int32_t vi13x0 = (int32_t) i13[0]; const int32_t vi13x1 = (int32_t) i13[1]; i13 += 2; const int32_t vk13x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; const int32_t vk13x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[27]; vacc0 += vi13x0 * vk13x0; vacc1 += vi13x1 * vk13x1; const int32_t vi14x0 = (int32_t) i14[0]; const int32_t vi14x1 = (int32_t) i14[1]; i14 += 2; const int32_t vk14x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; const int32_t vk14x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[29]; vacc0 += vi14x0 * vk14x0; vacc1 += vi14x1 * vk14x1; const int32_t vi15x0 = (int32_t) i15[0]; const int32_t vi15x1 = (int32_t) i15[1]; i15 += 2; const int32_t vk15x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; const int32_t vk15x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[31]; vacc0 += vi15x0 * vk15x0; vacc1 += vi15x1 * vk15x1; const int32_t vi16x0 = (int32_t) i16[0]; const int32_t vi16x1 = (int32_t) i16[1]; i16 += 2; const int32_t vk16x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; const int32_t vk16x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[33]; vacc0 += vi16x0 * vk16x0; vacc1 += vi16x1 * vk16x1; const int32_t vi17x0 = (int32_t) i17[0]; const int32_t vi17x1 = (int32_t) i17[1]; i17 += 2; const int32_t vk17x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; const int32_t vk17x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[35]; vacc0 += vi17x0 * vk17x0; vacc1 += vi17x1 * vk17x1; const int32_t vi18x0 = (int32_t) i18[0]; const int32_t vi18x1 = (int32_t) i18[1]; i18 += 2; const int32_t vk18x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; const int32_t vk18x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[37]; vacc0 += vi18x0 * vk18x0; vacc1 += vi18x1 * vk18x1; const int32_t vi19x0 = (int32_t) i19[0]; const int32_t vi19x1 = (int32_t) i19[1]; i19 += 2; const int32_t vk19x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; const int32_t vk19x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[39]; vacc0 += vi19x0 * vk19x0; vacc1 += vi19x1 * vk19x1; const int32_t vi20x0 = (int32_t) i20[0]; const int32_t vi20x1 = (int32_t) i20[1]; i20 += 2; const int32_t vk20x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; const int32_t vk20x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[41]; vacc0 += vi20x0 * vk20x0; vacc1 += vi20x1 * vk20x1; const int32_t vi21x0 = (int32_t) i21[0]; const int32_t vi21x1 = (int32_t) i21[1]; i21 += 2; const int32_t vk21x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; const int32_t vk21x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[43]; vacc0 += vi21x0 * vk21x0; vacc1 += vi21x1 * vk21x1; const int32_t vi22x0 = (int32_t) i22[0]; const int32_t vi22x1 = (int32_t) i22[1]; i22 += 2; const int32_t vk22x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; const int32_t vk22x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[45]; vacc0 += vi22x0 * vk22x0; vacc1 += vi22x1 * vk22x1; const int32_t vi23x0 = (int32_t) i23[0]; const int32_t vi23x1 = (int32_t) i23[1]; i23 += 2; const int32_t vk23x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; const int32_t vk23x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[47]; vacc0 += vi23x0 * vk23x0; vacc1 += vi23x1 * vk23x1; const int32_t vi24x0 = (int32_t) i24[0]; const int32_t vi24x1 = (int32_t) i24[1]; i24 += 2; const int32_t vk24x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; const int32_t vk24x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[49]; vacc0 += vi24x0 * vk24x0; vacc1 += vi24x1 * vk24x1; w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t)); float vfpacc0 = (float) vacc0; float vfpacc1 = (float) vacc1; const float vscale0 = unaligned_indexed_load_f32(w, 0); const float vscale1 = unaligned_indexed_load_f32(w, 1); w = (const void*) ((const float*) w + 2); vfpacc0 *= vscale0; vfpacc1 *= vscale1; vfpacc0 = __builtin_wasm_max_f32(vfpacc0, voutput_min_less_zero_point); vfpacc1 = __builtin_wasm_max_f32(vfpacc1, voutput_min_less_zero_point); vfpacc0 = __builtin_wasm_min_f32(vfpacc0, voutput_max_less_zero_point); vfpacc1 = __builtin_wasm_min_f32(vfpacc1, voutput_max_less_zero_point); vfpacc0 += vmagic_bias; vfpacc1 += vmagic_bias; int32_t vout0 = (int32_t) float_as_uint32(vfpacc0) - vmagic_bias_less_output_zero_point; int32_t vout1 = (int32_t) float_as_uint32(vfpacc1) - vmagic_bias_less_output_zero_point; output[0] = (int8_t) vout0; output[1] = (int8_t) vout1; output += 2; } if XNN_UNLIKELY(c != 0) { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0; const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1; const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2; const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; vacc += vi2 * vk2; const int32_t vi3 = (int32_t) *i3; const int32_t vk3 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[6]; vacc += vi3 * vk3; const int32_t vi4 = (int32_t) *i4; const int32_t vk4 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[8]; vacc += vi4 * vk4; const int32_t vi5 = (int32_t) *i5; const int32_t vk5 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[10]; vacc += vi5 * vk5; const int32_t vi6 = (int32_t) *i6; const int32_t vk6 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[12]; vacc += vi6 * vk6; const int32_t vi7 = (int32_t) *i7; const int32_t vk7 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[14]; vacc += vi7 * vk7; const int32_t vi8 = (int32_t) *i8; const int32_t vk8 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[16]; vacc += vi8 * vk8; const int32_t vi9 = (int32_t) *i9; const int32_t vk9 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[18]; vacc += vi9 * vk9; const int32_t vi10 = (int32_t) *i10; const int32_t vk10 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[20]; vacc += vi10 * vk10; const int32_t vi11 = (int32_t) *i11; const int32_t vk11 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[22]; vacc += vi11 * vk11; const int32_t vi12 = (int32_t) *i12; const int32_t vk12 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[24]; vacc += vi12 * vk12; const int32_t vi13 = (int32_t) *i13; const int32_t vk13 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[26]; vacc += vi13 * vk13; const int32_t vi14 = (int32_t) *i14; const int32_t vk14 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[28]; vacc += vi14 * vk14; const int32_t vi15 = (int32_t) *i15; const int32_t vk15 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[30]; vacc += vi15 * vk15; const int32_t vi16 = (int32_t) *i16; const int32_t vk16 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[32]; vacc += vi16 * vk16; const int32_t vi17 = (int32_t) *i17; const int32_t vk17 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[34]; vacc += vi17 * vk17; const int32_t vi18 = (int32_t) *i18; const int32_t vk18 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[36]; vacc += vi18 * vk18; const int32_t vi19 = (int32_t) *i19; const int32_t vk19 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[38]; vacc += vi19 * vk19; const int32_t vi20 = (int32_t) *i20; const int32_t vk20 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[40]; vacc += vi20 * vk20; const int32_t vi21 = (int32_t) *i21; const int32_t vk21 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[42]; vacc += vi21 * vk21; const int32_t vi22 = (int32_t) *i22; const int32_t vk22 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[44]; vacc += vi22 * vk22; const int32_t vi23 = (int32_t) *i23; const int32_t vk23 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[46]; vacc += vi23 * vk23; const int32_t vi24 = (int32_t) *i24; const int32_t vk24 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[48]; vacc += vi24 * vk24; const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 50 * sizeof(int8_t))); float vfpacc = (float) vacc * vscale; vfpacc = __builtin_wasm_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = __builtin_wasm_min_f32(vfpacc, voutput_max_less_zero_point); vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point; *output++ = (int8_t) vout; } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
20,764
37.311808
121
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-3p1c-minmax-fp32-scalar-fmagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p1c__scalar_fmagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_fmagic.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_fmagic.output_max_less_zero_point; const float vmagic_bias = params->fp32_scalar_fmagic.magic_bias; const int32_t vmagic_bias_less_output_zero_point = params->fp32_scalar_fmagic.magic_bias_less_output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; do { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0++; const int32_t vk0 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1++; const int32_t vk1 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[1]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2++; const int32_t vk2 = ((const int8_t*) ((uintptr_t) w + sizeof(int32_t)))[2]; vacc += vi2 * vk2; w = (const void*) ((uintptr_t) w + sizeof(int32_t) + 3 * sizeof(int8_t)); const float vscale = unaligned_load_f32(w); w = (const void*) ((const float*) w + 1); float vfpacc = (float) vacc * vscale; vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point); vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc) - vmagic_bias_less_output_zero_point; *output++ = (int8_t) vout; } while (--c != 0); output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
2,956
33.383721
114
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-3p2c-minmax-fp32-scalar-imagic.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p2c__scalar_imagic( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float vmagic_bias = params->fp32_scalar_imagic.magic_bias; const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min; const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max; const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= 2; c -= 2) { int32_t vacc0 = unaligned_indexed_load_s32(w, 0); int32_t vacc1 = unaligned_indexed_load_s32(w, 1); const int32_t vi0x0 = (int32_t) i0[0]; const int32_t vi0x1 = (int32_t) i0[1]; i0 += 2; const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1]; vacc0 += vi0x0 * vk0x0; vacc1 += vi0x1 * vk0x1; const int32_t vi1x0 = (int32_t) i1[0]; const int32_t vi1x1 = (int32_t) i1[1]; i1 += 2; const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3]; vacc0 += vi1x0 * vk1x0; vacc1 += vi1x1 * vk1x1; const int32_t vi2x0 = (int32_t) i2[0]; const int32_t vi2x1 = (int32_t) i2[1]; i2 += 2; const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5]; vacc0 += vi2x0 * vk2x0; vacc1 += vi2x1 * vk2x1; w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t)); float vfpacc0 = (float) vacc0; float vfpacc1 = (float) vacc1; const float vscale0 = unaligned_indexed_load_f32(w, 0); const float vscale1 = unaligned_indexed_load_f32(w, 1); w = (const void*) ((const float*) w + 2); vfpacc0 *= vscale0; vfpacc1 *= vscale1; vfpacc0 += vmagic_bias; vfpacc1 += vmagic_bias; int32_t vout0 = (int32_t) float_as_uint32(vfpacc0); int32_t vout1 = (int32_t) float_as_uint32(vfpacc1); vout0 = math_max_s32(vout0, vmagic_min); vout1 = math_max_s32(vout1, vmagic_min); vout0 = math_min_s32(vout0, vmagic_max); vout1 = math_min_s32(vout1, vmagic_max); vout0 -= vmagic_bias_less_zero_point; vout1 -= vmagic_bias_less_zero_point; output[0] = (int8_t) vout0; output[1] = (int8_t) vout1; output += 2; } if XNN_UNLIKELY(c != 0) { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0; const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1; const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2; const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; vacc += vi2 * vk2; const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t))); float vfpacc = (float) vacc * vscale; vfpacc += vmagic_bias; int32_t vout = (int32_t) float_as_uint32(vfpacc); vout = math_max_s32(vout, vmagic_min); vout = math_min_s32(vout, vmagic_max); vout -= vmagic_bias_less_zero_point; *output++ = (int8_t) vout; } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
4,994
32.3
120
c
XNNPACK
XNNPACK-master/src/qs8-qc8w-dwconv/gen/qs8-qc8w-dwconv-3p2c-minmax-fp32-scalar-lrintf.c
// Auto-generated file. Do not edit! // Template: src/qs8-dwconv/unipass-scalar.c.in // Generator: tools/xngen // // Copyright 2021 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include <assert.h> #include <math.h> #include <xnnpack/dwconv.h> #include <xnnpack/math.h> #include <xnnpack/unaligned.h> void xnn_qs8_qc8w_dwconv_minmax_fp32_ukernel_3p2c__scalar_lrintf( size_t channels, size_t output_width, const int8_t** input, const void* weights, int8_t* output, intptr_t input_stride, size_t output_increment, size_t input_offset, const int8_t* zero, const union xnn_qs8_qc8w_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) { assert(channels != 0); assert(output_width != 0); const float voutput_min_less_zero_point = params->fp32_scalar_lrintf.output_min_less_zero_point; const float voutput_max_less_zero_point = params->fp32_scalar_lrintf.output_max_less_zero_point; const int32_t voutput_zero_point = params->fp32_scalar_lrintf.output_zero_point; do { const int8_t* i0 = input[0]; assert(i0 != NULL); if XNN_UNPREDICTABLE(i0 != zero) { i0 = (const int8_t*) ((uintptr_t) i0 + input_offset); } const int8_t* i1 = input[1]; assert(i1 != NULL); if XNN_UNPREDICTABLE(i1 != zero) { i1 = (const int8_t*) ((uintptr_t) i1 + input_offset); } const int8_t* i2 = input[2]; assert(i2 != NULL); if XNN_UNPREDICTABLE(i2 != zero) { i2 = (const int8_t*) ((uintptr_t) i2 + input_offset); } input = (const int8_t**) ((uintptr_t) input + input_stride); size_t c = channels; const void* w = weights; for (; c >= 2; c -= 2) { int32_t vacc0 = unaligned_indexed_load_s32(w, 0); int32_t vacc1 = unaligned_indexed_load_s32(w, 1); const int32_t vi0x0 = (int32_t) i0[0]; const int32_t vi0x1 = (int32_t) i0[1]; i0 += 2; const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1]; vacc0 += vi0x0 * vk0x0; vacc1 += vi0x1 * vk0x1; const int32_t vi1x0 = (int32_t) i1[0]; const int32_t vi1x1 = (int32_t) i1[1]; i1 += 2; const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3]; vacc0 += vi1x0 * vk1x0; vacc1 += vi1x1 * vk1x1; const int32_t vi2x0 = (int32_t) i2[0]; const int32_t vi2x1 = (int32_t) i2[1]; i2 += 2; const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5]; vacc0 += vi2x0 * vk2x0; vacc1 += vi2x1 * vk2x1; w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t)); float vfpacc0 = (float) vacc0; float vfpacc1 = (float) vacc1; const float vscale0 = unaligned_indexed_load_f32(w, 0); const float vscale1 = unaligned_indexed_load_f32(w, 1); w = (const void*) ((const float*) w + 2); vfpacc0 *= vscale0; vfpacc1 *= vscale1; vfpacc0 = math_max_f32(vfpacc0, voutput_min_less_zero_point); vfpacc1 = math_max_f32(vfpacc1, voutput_min_less_zero_point); vfpacc0 = math_min_f32(vfpacc0, voutput_max_less_zero_point); vfpacc1 = math_min_f32(vfpacc1, voutput_max_less_zero_point); const int32_t vrndacc0 = (int32_t) lrintf(vfpacc0); const int32_t vrndacc1 = (int32_t) lrintf(vfpacc1); int32_t vout0 = (int32_t) vrndacc0 + voutput_zero_point; int32_t vout1 = (int32_t) vrndacc1 + voutput_zero_point; output[0] = (int8_t) vout0; output[1] = (int8_t) vout1; output += 2; } if XNN_UNLIKELY(c != 0) { int32_t vacc = unaligned_load_s32(w); const int32_t vi0 = (int32_t) *i0; const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0]; vacc += vi0 * vk0; const int32_t vi1 = (int32_t) *i1; const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2]; vacc += vi1 * vk1; const int32_t vi2 = (int32_t) *i2; const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4]; vacc += vi2 * vk2; const float vscale = unaligned_load_f32((const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t))); float vfpacc = (float) vacc * vscale; vfpacc = math_max_f32(vfpacc, voutput_min_less_zero_point); vfpacc = math_min_f32(vfpacc, voutput_max_less_zero_point); const int32_t vrndacc = (int32_t) lrintf(vfpacc); int32_t vout = vrndacc + voutput_zero_point; *output++ = (int8_t) vout; } output = (int8_t*) ((uintptr_t) output + output_increment); } while (--output_width != 0); }
5,073
33.753425
120
c